entry_point
stringlengths
1
65
original_triton_python_code
stringlengths
208
619k
optimised_triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
BinaryMarginLoss
import torch import torch.nn as nn import torch.distributions import torch.utils.data class BinaryMarginLoss(nn.Module): def __init__(self, margin=0.5): super().__init__() self.margin = margin def forward(self, output): return torch.logaddexp(torch.tensor([1.0], device=output.device), self.margin - output) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.distributions import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_logaddexp_rsub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp1 - tmp0 tmp3 = 1.0 tmp4 = tmp3 == tmp2 tmp5 = tl.full([1], False, tl.int1) tmp6 = tmp5 & tmp4 tmp7 = tmp3 >= tmp2 tmp8 = tl.where(tmp7, tmp3, tmp2) tmp9 = tl.where(tmp7, tmp2, tmp3) tmp10 = tmp9 - tmp8 tmp11 = tl_math.exp(tmp10) tmp12 = libdevice.log1p(tmp11) tmp13 = tmp8 + tmp12 tmp14 = tl.where(tmp6, tmp3, tmp13) tl.store(out_ptr0 + x0, tmp14, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_logaddexp_rsub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class BinaryMarginLossNew(nn.Module): def __init__(self, margin=0.5): super().__init__() self.margin = margin def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AlexMeinke/Provable-OOD-Detection
BinaryMarginLoss
false
7,682
[ "MIT" ]
21
9a132aec994ff718c96b81885736ab866df60d87
https://github.com/AlexMeinke/Provable-OOD-Detection/tree/9a132aec994ff718c96b81885736ab866df60d87
KDTH
import torch from torch import nn import torch.nn.functional as F class KDTH(nn.Module): """KD with a Teacher Head auxiliary loss""" def __init__(self, T=4): super(KDTH, self).__init__() self.T = T def forward(self, y_s, y_t): y_s_th = y_s[1] y_s = y_s[0] p_t = F.softmax(y_t / self.T, dim=1) p_s_th = F.log_softmax(y_s_th / self.T, dim=1) loss_th = F.kl_div(p_s_th, p_t, size_average=False ) * self.T ** 2 / y_s.shape[0] return loss_th def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x3, tmp17, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + (64 + x3), xmask) tmp3 = tl.load(in_ptr0 + (64 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (68 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (72 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (76 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x3, tmp13, xmask) @triton.jit def triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r4 = rindex r0 = rindex % 16 r2 = rindex // 64 r3 = rindex % 64 tmp0 = tl.load(in_ptr0 + r4, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float('nan') tmp16 = tl.where(tmp9, tmp15, tmp14) tmp18 = tmp8 * tmp17 tmp19 = tmp16 - tmp18 tmp20 = tl.broadcast_to(tmp19, [RBLOCK]) tmp22 = triton_helpers.promote_to_tensor(tl.sum(tmp20, 0)) tmp23 = 16.0 tmp24 = tmp22 * tmp23 tmp25 = 0.25 tmp26 = tmp24 * tmp25 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp26, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_1[grid(64)](arg0_1, buf2, 64, XBLOCK=64, num_warps =1, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__log_softmax_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4 del buf4 triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_3[grid(1) ](buf5, buf0, buf3, 1, 256, num_warps=2, num_stages=1) del buf0 del buf3 return buf5, class KDTHNew(nn.Module): """KD with a Teacher Head auxiliary loss""" def __init__(self, T=4): super(KDTHNew, self).__init__() self.T = T def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Alibaba-MIIL/HeadSharingKD
KDTH
false
7,683
[ "BSD-2-Clause" ]
15
8e2738bf069c7d12ec933f9b9107f267f7b6603a
https://github.com/Alibaba-MIIL/HeadSharingKD/tree/8e2738bf069c7d12ec933f9b9107f267f7b6603a
Add_ParamI
import torch import torch.nn as nn import torch.distributions import torch.utils.data class Add_ParamI(nn.Module): def __init__(self): super().__init__() self.bias = nn.Parameter(torch.zeros(1)) def forward(self, x): out = x + self.bias return out def ibp_forward(self, l, u): l_ = l + self.bias u_ = u + self.bias return l_, u_ def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.distributions import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, class Add_ParamINew(nn.Module): def __init__(self): super().__init__() self.bias = nn.Parameter(torch.zeros(1)) def ibp_forward(self, l, u): l_ = l + self.bias u_ = u + self.bias return l_, u_ def forward(self, input_0): primals_1 = self.bias primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
AlexMeinke/Provable-OOD-Detection
Add_ParamI
false
7,684
[ "MIT" ]
21
9a132aec994ff718c96b81885736ab866df60d87
https://github.com/AlexMeinke/Provable-OOD-Detection/tree/9a132aec994ff718c96b81885736ab866df60d87
Contraster
import torch import torch.distributions import torch.utils.data class AdversarialNoiseGenerator(torch.nn.Module): def __init__(self): super().__init__() return def forward(self, x): raise NotImplementedError() class Contraster(AdversarialNoiseGenerator): def __init__(self, eps): super().__init__() self.eps = eps def forward(self, x): eps = self.eps s = (x > 1 - eps).float() + torch.clamp(x * (x <= 1 - eps).float() - eps, 0, 1) return s - x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'eps': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.distributions import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy_add_clamp_gt_le_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = -3.0 tmp2 = tmp0 > tmp1 tmp3 = tmp2.to(tl.float32) tmp4 = tmp0 <= tmp1 tmp5 = tmp4.to(tl.float32) tmp6 = tmp0 * tmp5 tmp7 = 4.0 tmp8 = tmp6 - tmp7 tmp9 = 0.0 tmp10 = triton_helpers.maximum(tmp8, tmp9) tmp11 = 1.0 tmp12 = triton_helpers.minimum(tmp10, tmp11) tmp13 = tmp3 + tmp12 tmp14 = tmp13 - tmp0 tl.store(out_ptr0 + x0, tmp14, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__to_copy_add_clamp_gt_le_mul_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class AdversarialNoiseGenerator(torch.nn.Module): def __init__(self): super().__init__() return def forward(self, x): raise NotImplementedError() class ContrasterNew(AdversarialNoiseGenerator): def __init__(self, eps): super().__init__() self.eps = eps def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AlexMeinke/Provable-OOD-Detection
Contraster
false
7,685
[ "MIT" ]
21
9a132aec994ff718c96b81885736ab866df60d87
https://github.com/AlexMeinke/Provable-OOD-Detection/tree/9a132aec994ff718c96b81885736ab866df60d87
MLP
import torch import torch.nn as nn import torch.nn.functional as F class MLP(nn.Module): def __init__(self, indim, hs, outdim, mlp_drop): super().__init__() """ eh, et, |eh-et|, eh*et """ indim = 4 * indim self.linear1 = nn.Linear(indim, 2 * hs) self.linear2 = nn.Linear(2 * hs, outdim) self.drop = nn.Dropout(mlp_drop) def forward(self, head_rep, tail_rep): """ :param head_rep: (?, hs) :param tail_rep: (?, hs) :param doc_rep: (1, hs) :return: logits (?, outdim) """ mlp_input = [head_rep, tail_rep, torch.abs(head_rep - tail_rep), head_rep * tail_rep] mlp_input = torch.cat(mlp_input, -1) h = self.drop(F.relu(self.linear1(mlp_input))) return self.linear2(h) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'indim': 4, 'hs': 4, 'outdim': 4, 'mlp_drop': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (4 * x1 + (-8 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.load(in_ptr1 + (4 * x1 + (-8 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp15 - tmp16 tmp18 = tl_math.abs(tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp14, tmp18, tmp19) tmp21 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp24 = tl.load(in_ptr0 + (4 * x1 + (-12 + x0)), tmp21 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = tl.load(in_ptr1 + (4 * x1 + (-12 + x0)), tmp21 & xmask, eviction_policy='evict_last', other=0.0) tmp26 = tmp24 * tmp25 tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp21, tmp26, tmp27) tmp29 = tl.where(tmp14, tmp20, tmp28) tmp30 = tl.where(tmp9, tmp10, tmp29) tmp31 = tl.where(tmp4, tmp5, tmp30) tl.store(out_ptr0 + x2, tmp31, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (8, 16), (16, 1)) assert_size_stride(primals_4, (8,), (1,)) assert_size_stride(primals_5, (4, 8), (8, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch. float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(1024)](primals_1, primals_2, buf0, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 16), (16, 1), 0), reinterpret_tensor(primals_3, (16, 8), (1, 16), 0), out=buf1) del primals_3 buf2 = reinterpret_tensor(buf1, (4, 4, 4, 8), (128, 32, 8, 1), 0) del buf1 buf4 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(512)](buf2, primals_4, buf4, 512, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(buf2, (64, 8), ( 8, 1), 0), reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf3) del primals_6 return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(buf0, (64, 16), (16, 1), 0), reinterpret_tensor( buf2, (64, 8), (8, 1), 0), primals_5, buf4 class MLPNew(nn.Module): def __init__(self, indim, hs, outdim, mlp_drop): super().__init__() """ eh, et, |eh-et|, eh*et """ indim = 4 * indim self.linear1 = nn.Linear(indim, 2 * hs) self.linear2 = nn.Linear(2 * hs, outdim) self.drop = nn.Dropout(mlp_drop) def forward(self, input_0, input_1): primals_3 = self.linear1.weight primals_4 = self.linear1.bias primals_5 = self.linear2.weight primals_6 = self.linear2.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
AndrewZhe/Three-Sentences-Are-All-You-Need
MLP
false
7,686
[ "MIT" ]
21
afad6f9e700c9a95e03ef200718ebee8e18ca016
https://github.com/AndrewZhe/Three-Sentences-Are-All-You-Need/tree/afad6f9e700c9a95e03ef200718ebee8e18ca016
Conv2dBlock
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, num_features, eps=1e-05, momentum=0.1): super(AdaptiveInstanceNorm2d, self).__init__() self.num_features = num_features self.eps = eps self.momentum = momentum self.weight = None self.bias = None self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) def forward(self, x): assert self.weight is not None and self.bias is not None, 'Please assign weight and bias before calling AdaIN!' b, c = x.size(0), x.size(1) running_mean = self.running_mean.repeat(b) running_var = self.running_var.repeat(b) x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:]) out = F.batch_norm(x_reshaped, running_mean, running_var, self. weight, self.bias, True, self.momentum, self.eps) return out.view(b, c, *x.size()[2:]) def __repr__(self): return self.__class__.__name__ + '(' + str(self.num_features) + ')' class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-05, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_()) self.beta = nn.Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class SpectralNorm(nn.Module): """ Based on the paper "Spectral Normalization for Generative Adversarial Networks" by Takeru Miyato, Toshiki Kataoka, Masanori Koyama, Yuichi Yoshida and the Pytorch implementation https://github.com/christiancosgrove/pytorch-spectral-normalization-gan """ def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = nn.Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = nn.Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = nn.Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class Conv2dBlock(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, stride, padding= 0, norm='none', activation='relu', pad_type='zero'): super(Conv2dBlock, self).__init__() self.use_bias = True if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) norm_dim = output_dim if norm == 'bn': self.norm = nn.BatchNorm2d(norm_dim) elif norm == 'in': self.norm = nn.InstanceNorm2d(norm_dim) elif norm == 'ln': self.norm = LayerNorm(norm_dim) elif norm == 'adain': self.norm = AdaptiveInstanceNorm2d(norm_dim) elif norm == 'none' or norm == 'sn': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if norm == 'sn': self.conv = SpectralNorm(nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)) else: self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias) def forward(self, x): x = self.conv(self.pad(x)) if self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'output_dim': 4, 'kernel_size': 4, 'stride': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.functional as F import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool) get_raw_stream(0) triton_poi_fused_convolution_relu_threshold_backward_0[grid(16)](buf1, primals_3, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 return buf1, primals_1, primals_2, buf2 def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, num_features, eps=1e-05, momentum=0.1): super(AdaptiveInstanceNorm2d, self).__init__() self.num_features = num_features self.eps = eps self.momentum = momentum self.weight = None self.bias = None self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) def forward(self, x): assert self.weight is not None and self.bias is not None, 'Please assign weight and bias before calling AdaIN!' b, c = x.size(0), x.size(1) running_mean = self.running_mean.repeat(b) running_var = self.running_var.repeat(b) x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:]) out = F.batch_norm(x_reshaped, running_mean, running_var, self. weight, self.bias, True, self.momentum, self.eps) return out.view(b, c, *x.size()[2:]) def __repr__(self): return self.__class__.__name__ + '(' + str(self.num_features) + ')' class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-05, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_()) self.beta = nn.Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class SpectralNorm(nn.Module): """ Based on the paper "Spectral Normalization for Generative Adversarial Networks" by Takeru Miyato, Toshiki Kataoka, Masanori Koyama, Yuichi Yoshida and the Pytorch implementation https://github.com/christiancosgrove/pytorch-spectral-normalization-gan """ def __init__(self, module, name='weight', power_iterations=1): super(SpectralNorm, self).__init__() self.module = module self.name = name self.power_iterations = power_iterations if not self._made_params(): self._make_params() def _update_u_v(self): u = getattr(self.module, self.name + '_u') v = getattr(self.module, self.name + '_v') w = getattr(self.module, self.name + '_bar') height = w.data.shape[0] for _ in range(self.power_iterations): v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data)) u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) sigma = u.dot(w.view(height, -1).mv(v)) setattr(self.module, self.name, w / sigma.expand_as(w)) def _made_params(self): try: getattr(self.module, self.name + '_u') getattr(self.module, self.name + '_v') getattr(self.module, self.name + '_bar') return True except AttributeError: return False def _make_params(self): w = getattr(self.module, self.name) height = w.data.shape[0] width = w.view(height, -1).data.shape[1] u = nn.Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) v = nn.Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) u.data = l2normalize(u.data) v.data = l2normalize(v.data) w_bar = nn.Parameter(w.data) del self.module._parameters[self.name] self.module.register_parameter(self.name + '_u', u) self.module.register_parameter(self.name + '_v', v) self.module.register_parameter(self.name + '_bar', w_bar) def forward(self, *args): self._update_u_v() return self.module.forward(*args) class Conv2dBlockNew(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, stride, padding= 0, norm='none', activation='relu', pad_type='zero'): super(Conv2dBlockNew, self).__init__() self.use_bias = True if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) norm_dim = output_dim if norm == 'bn': self.norm = nn.BatchNorm2d(norm_dim) elif norm == 'in': self.norm = nn.InstanceNorm2d(norm_dim) elif norm == 'ln': self.norm = LayerNorm(norm_dim) elif norm == 'adain': self.norm = AdaptiveInstanceNorm2d(norm_dim) elif norm == 'none' or norm == 'sn': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if norm == 'sn': self.conv = SpectralNorm(nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)) else: self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias) def forward(self, input_0): primals_1 = self.conv.weight primals_3 = self.conv.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AllenPu/mbdg
Conv2dBlock
false
7,687
[ "MIT" ]
27
243f53a57dcf4bfb6e717c0c9f64a839cff8d548
https://github.com/AllenPu/mbdg/tree/243f53a57dcf4bfb6e717c0c9f64a839cff8d548
DistillMSE
import torch from torch import nn import torch.nn.functional as F class DistillMSE(nn.Module): """Distilling the Knowledge in a Neural Network""" def __init__(self): super(DistillMSE, self).__init__() pass def forward(self, y_s, y_t): loss = nn.MSELoss(reduction='mean')(F.softmax(y_s, dim=1), F. softmax(y_t, dim=1)) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_per_fused__softmax_mse_loss_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr1 + r3, None) tmp10 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp12 = tmp10 + tmp11 tmp14 = tmp12 + tmp13 tmp16 = tmp14 + tmp15 tmp17 = tmp9 / tmp16 tmp18 = tmp8 - tmp17 tmp19 = tmp18 * tmp18 tmp20 = tl.broadcast_to(tmp19, [RBLOCK]) tmp22 = triton_helpers.promote_to_tensor(tl.sum(tmp20, 0)) tmp23 = 256.0 tmp24 = tmp22 / tmp23 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp24, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_0[grid(256)](arg1_1, buf1, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused__softmax_mse_loss_1[grid(1)](buf4, buf0, buf1, 1, 256, num_warps=2, num_stages=1) del buf0 del buf1 return buf4, class DistillMSENew(nn.Module): """Distilling the Knowledge in a Neural Network""" def __init__(self): super(DistillMSENew, self).__init__() pass def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Alibaba-MIIL/HeadSharingKD
DistillMSE
false
7,688
[ "BSD-2-Clause" ]
15
8e2738bf069c7d12ec933f9b9107f267f7b6603a
https://github.com/Alibaba-MIIL/HeadSharingKD/tree/8e2738bf069c7d12ec933f9b9107f267f7b6603a
OELossLogConf
import torch import torch.nn as nn import torch.distributions import torch.utils.data class OELossLogConf(nn.Module): def __init__(self): super().__init__() def forward(self, confs): return -confs.mean(1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.distributions import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_neg_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = -tmp8 tl.store(out_ptr0 + x2, tmp9, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_neg_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class OELossLogConfNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AlexMeinke/Provable-OOD-Detection
OELossLogConf
false
7,689
[ "MIT" ]
21
9a132aec994ff718c96b81885736ab866df60d87
https://github.com/AlexMeinke/Provable-OOD-Detection/tree/9a132aec994ff718c96b81885736ab866df60d87
Fire
import torch from torch import nn from collections import OrderedDict class Fire(nn.Module): def __init__(self, inplanes, squeeze_planes, expand1x1_planes, expand3x3_planes): super(Fire, self).__init__() self.inplanes = inplanes self.group1 = nn.Sequential(OrderedDict([('squeeze', nn.Conv2d( inplanes, squeeze_planes, kernel_size=1)), ( 'squeeze_activation', nn.ReLU(inplace=True))])) self.group2 = nn.Sequential(OrderedDict([('expand1x1', nn.Conv2d( squeeze_planes, expand1x1_planes, kernel_size=1)), ( 'expand1x1_activation', nn.ReLU(inplace=True))])) self.group3 = nn.Sequential(OrderedDict([('expand3x3', nn.Conv2d( squeeze_planes, expand3x3_planes, kernel_size=3, padding=1)), ( 'expand3x3_activation', nn.ReLU(inplace=True))])) def forward(self, x): x = self.group1(x) return torch.cat([self.group2(x), self.group3(x)], 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inplanes': 4, 'squeeze_planes': 4, 'expand1x1_planes': 4, 'expand3x3_planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn from collections import OrderedDict assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp15 = tl.load(in_ptr2 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp12 & xmask, other=0.0) tmp16 = tl.load(in_ptr3 + (-4 + x1), tmp12 & xmask, eviction_policy= 'evict_last', other=0.0) tmp17 = tmp15 + tmp16 tmp18 = triton_helpers.maximum(tmp8, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp12, tmp18, tmp19) tmp21 = tl.where(tmp4, tmp11, tmp20) tl.store(out_ptr0 + x3, tmp21, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = extern_kernels.convolution(buf1, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) triton_poi_fused_cat_1[grid(512)](buf2, primals_5, buf3, primals_7, buf4, 512, XBLOCK=256, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_2[grid(256)](buf3, primals_7, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf3 del primals_7 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_2[grid(256)](buf2, primals_5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf2 del primals_5 return buf4, primals_1, primals_3, primals_4, primals_6, buf1, buf5, buf6 class FireNew(nn.Module): def __init__(self, inplanes, squeeze_planes, expand1x1_planes, expand3x3_planes): super(FireNew, self).__init__() self.inplanes = inplanes self.group1 = nn.Sequential(OrderedDict([('squeeze', nn.Conv2d( inplanes, squeeze_planes, kernel_size=1)), ( 'squeeze_activation', nn.ReLU(inplace=True))])) self.group2 = nn.Sequential(OrderedDict([('expand1x1', nn.Conv2d( squeeze_planes, expand1x1_planes, kernel_size=1)), ( 'expand1x1_activation', nn.ReLU(inplace=True))])) self.group3 = nn.Sequential(OrderedDict([('expand3x3', nn.Conv2d( squeeze_planes, expand3x3_planes, kernel_size=3, padding=1)), ( 'expand3x3_activation', nn.ReLU(inplace=True))])) def forward(self, input_0): primals_1 = self.group1.squeeze.weight primals_2 = self.group1.squeeze.bias primals_4 = self.group2.expand1x1.weight primals_5 = self.group2.expand1x1.bias primals_6 = self.group3.expand3x3.weight primals_7 = self.group3.expand3x3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Alibaba-AAIG/Beyond-ImageNet-Attack
Fire
false
7,690
[ "MIT" ]
23
c14b4844b64a8035b8fe033a617c0567224a9fa4
https://github.com/Alibaba-AAIG/Beyond-ImageNet-Attack/tree/c14b4844b64a8035b8fe033a617c0567224a9fa4
CTLSTMCell
import torch import torch.nn as nn import torch.nn.functional as F class CTLSTMCell(nn.Module): def __init__(self, hidden_dim, beta=1.0, device=None): super(CTLSTMCell, self).__init__() device = device or 'cpu' self.device = torch.device(device) self.hidden_dim = hidden_dim self.linear = nn.Linear(hidden_dim * 2, hidden_dim * 7, bias=True) self.beta = beta def forward(self, rnn_input, hidden_t_i_minus, cell_t_i_minus, cell_bar_im1 ): dim_of_hidden = rnn_input.dim() - 1 input_i = torch.cat((rnn_input, hidden_t_i_minus), dim=dim_of_hidden) output_i = self.linear(input_i) (gate_input, gate_forget, gate_output, gate_pre_c, gate_input_bar, gate_forget_bar, gate_decay) = output_i.chunk(7, dim_of_hidden) gate_input = torch.sigmoid(gate_input) gate_forget = torch.sigmoid(gate_forget) gate_output = torch.sigmoid(gate_output) gate_pre_c = torch.tanh(gate_pre_c) gate_input_bar = torch.sigmoid(gate_input_bar) gate_forget_bar = torch.sigmoid(gate_forget_bar) gate_decay = F.softplus(gate_decay, beta=self.beta) cell_i = gate_forget * cell_t_i_minus + gate_input * gate_pre_c cell_bar_i = (gate_forget_bar * cell_bar_im1 + gate_input_bar * gate_pre_c) return cell_i, cell_bar_i, gate_decay, gate_output def decay(self, cell_i, cell_bar_i, gate_decay, gate_output, dtime): if dtime.dim() < cell_i.dim(): dtime = dtime.unsqueeze(cell_i.dim() - 1).expand_as(cell_i) cell_t_ip1_minus = cell_bar_i + (cell_i - cell_bar_i) * torch.exp(- gate_decay * dtime) hidden_t_ip1_minus = gate_output * torch.tanh(cell_t_ip1_minus) return cell_t_ip1_minus, hidden_t_ip1_minus def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_sigmoid_stack_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1280 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 64 x0 = xindex % 4 x1 = xindex // 4 % 16 x3 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (8 + x0 + 28 * x1 + 448 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (4 + x0 + 28 * x1 + 448 * (-4 + x2)), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (x0 + 28 * x1 + 448 * (-8 + x2)), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 16, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr0 + (20 + x0 + 28 * x1 + 448 * (-12 + x2)), tmp19 & xmask, other=0.0) tmp21 = tmp0 >= tmp17 tl.full([1], 20, tl.int64) tmp24 = tl.load(in_ptr0 + (16 + x0 + 28 * x1 + 448 * (-16 + x2)), tmp21 & xmask, other=0.0) tmp25 = tl.where(tmp19, tmp20, tmp24) tmp26 = tl.where(tmp14, tmp15, tmp25) tmp27 = tl.where(tmp9, tmp10, tmp26) tmp28 = tl.where(tmp4, tmp5, tmp27) tmp29 = tl.sigmoid(tmp28) tl.store(in_out_ptr0 + x3, tmp29, xmask) @triton.jit def triton_poi_fused_add_mul_tanh_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (12 + x0 + 28 * x1), xmask) tmp2 = tl.load(in_ptr1 + (256 + x2), xmask) tmp3 = tl.load(in_ptr2 + x2, xmask) tmp5 = tl.load(in_ptr1 + (512 + x2), xmask) tmp8 = tl.load(in_ptr1 + (768 + x2), xmask) tmp9 = tl.load(in_ptr3 + x2, xmask) tmp11 = tl.load(in_ptr1 + (1024 + x2), xmask) tmp1 = libdevice.tanh(tmp0) tmp4 = tmp2 * tmp3 tmp6 = tmp5 * tmp1 tmp7 = tmp4 + tmp6 tmp10 = tmp8 * tmp9 tmp12 = tmp11 * tmp1 tmp13 = tmp10 + tmp12 tl.store(out_ptr0 + x2, tmp1, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) tl.store(out_ptr2 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_softplus_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (24 + x0 + 28 * x1), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = 20.0 tmp4 = tmp2 > tmp3 tmp5 = tl_math.exp(tmp2) tmp6 = libdevice.log1p(tmp5) tmp7 = tmp6 * tmp1 tmp8 = tl.where(tmp4, tmp0, tmp7) tl.store(out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr1 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (28, 8), (8, 1)) assert_size_stride(primals_4, (28,), (1,)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 28), (28, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 8), ( 8, 1), 0), reinterpret_tensor(primals_3, (8, 28), (1, 8), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 buf2 = empty_strided_cuda((20, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = reinterpret_tensor(buf2, (5, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0 ) del buf2 triton_poi_fused_sigmoid_stack_1[grid(1280)](buf3, buf1, 1280, XBLOCK=128, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_tanh_2[grid(256)](buf1, buf3, primals_5, primals_6, buf4, buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_softplus_3[grid(256)](buf1, buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 return buf7, buf8, buf6, reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_5, primals_6, reinterpret_tensor(buf0, (64, 8), (8, 1), 0), buf3, reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 512), reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 1024 ), buf4, buf5 class CTLSTMCellNew(nn.Module): def __init__(self, hidden_dim, beta=1.0, device=None): super(CTLSTMCellNew, self).__init__() device = device or 'cpu' self.device = torch.device(device) self.hidden_dim = hidden_dim self.linear = nn.Linear(hidden_dim * 2, hidden_dim * 7, bias=True) self.beta = beta def decay(self, cell_i, cell_bar_i, gate_decay, gate_output, dtime): if dtime.dim() < cell_i.dim(): dtime = dtime.unsqueeze(cell_i.dim() - 1).expand_as(cell_i) cell_t_ip1_minus = cell_bar_i + (cell_i - cell_bar_i) * torch.exp(- gate_decay * dtime) hidden_t_ip1_minus = gate_output * torch.tanh(cell_t_ip1_minus) return cell_t_ip1_minus, hidden_t_ip1_minus def forward(self, input_0, input_1, input_2, input_3): primals_3 = self.linear.weight primals_4 = self.linear.bias primals_1 = input_0 primals_2 = input_1 primals_5 = input_2 primals_6 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0], output[1], output[2], output[3]
Anirudh-Murali/neural-hawkes-particle-smoothing
CTLSTMCell
false
7,691
[ "BSD-3-Clause" ]
37
96b258838bdab33b781008daeedfa61dec0d553c
https://github.com/Anirudh-Murali/neural-hawkes-particle-smoothing/tree/96b258838bdab33b781008daeedfa61dec0d553c
NormalNoiseGenerator
import torch import torch.distributions import torch.utils.data class AdversarialNoiseGenerator(torch.nn.Module): def __init__(self): super().__init__() return def forward(self, x): raise NotImplementedError() class NormalNoiseGenerator(AdversarialNoiseGenerator): def __init__(self, sigma=1.0, mu=0): super().__init__() self.sigma = sigma self.mu = mu def forward(self, x): return self.sigma * torch.randn_like(x) + self.mu def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch import device import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.distributions import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_add_mul_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = 0.0 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten.randn.default([4, 4, 4, 4], dtype=torch. float32, device=device(type='cuda', index=0), pin_memory=False) buf1 = buf0 del buf0 buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf2, class AdversarialNoiseGenerator(torch.nn.Module): def __init__(self): super().__init__() return def forward(self, x): raise NotImplementedError() class NormalNoiseGeneratorNew(AdversarialNoiseGenerator): def __init__(self, sigma=1.0, mu=0): super().__init__() self.sigma = sigma self.mu = mu def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AlexMeinke/Provable-OOD-Detection
NormalNoiseGenerator
false
7,692
[ "MIT" ]
21
9a132aec994ff718c96b81885736ab866df60d87
https://github.com/AlexMeinke/Provable-OOD-Detection/tree/9a132aec994ff718c96b81885736ab866df60d87
GOODLoss
import torch import torch.nn as nn import torch.distributions import torch.utils.data class GOODLoss(nn.Module): def __init__(self): super().__init__() def forward(self, ub_log_conf): return (ub_log_conf ** 2 / 2).log1p() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.distributions import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_log1p_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 * tmp0 tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = libdevice.log1p(tmp3) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_log1p_pow_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class GOODLossNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AlexMeinke/Provable-OOD-Detection
GOODLoss
false
7,693
[ "MIT" ]
21
9a132aec994ff718c96b81885736ab866df60d87
https://github.com/AlexMeinke/Provable-OOD-Detection/tree/9a132aec994ff718c96b81885736ab866df60d87
Lowerer
import torch import torch.distributions import torch.utils.data class AdversarialNoiseGenerator(torch.nn.Module): def __init__(self): super().__init__() return def forward(self, x): raise NotImplementedError() class Lowerer(AdversarialNoiseGenerator): def __init__(self, eps): super().__init__() self.eps = eps def forward(self, x): eps = self.eps s = torch.clamp(x - eps, 0, 1) return s - x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'eps': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.distributions import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 4.0 tmp2 = tmp0 - tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 1.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp6 - tmp0 tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 return buf0, class AdversarialNoiseGenerator(torch.nn.Module): def __init__(self): super().__init__() return def forward(self, x): raise NotImplementedError() class LowererNew(AdversarialNoiseGenerator): def __init__(self, eps): super().__init__() self.eps = eps def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AlexMeinke/Provable-OOD-Detection
Lowerer
false
7,694
[ "MIT" ]
21
9a132aec994ff718c96b81885736ab866df60d87
https://github.com/AlexMeinke/Provable-OOD-Detection/tree/9a132aec994ff718c96b81885736ab866df60d87
LinearI_Neg
import torch import torch.nn as nn import torch.nn.functional as F import torch.distributions import torch.utils.data class LinearI_Neg(nn.Linear): def forward(self, x): return F.linear(x, -self.weight.exp(), self.bias) def ibp_forward(self, l, u): weight = -self.weight.exp() if self.bias is not None: l_ = (weight.clamp(min=0) @ l.t() + weight.clamp(max=0) @ u.t() + self.bias[:, None]).t() u_ = (weight.clamp(min=0) @ u.t() + weight.clamp(max=0) @ l.t() + self.bias[:, None]).t() else: l_ = (weight.clamp(min=0) @ l.t() + weight.clamp(max=0) @ u.t()).t( ) u_ = (weight.clamp(min=0) @ u.t() + weight.clamp(max=0) @ l.t()).t( ) return l_, u_ def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.distributions import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_exp_neg_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.exp(tmp0) tmp2 = -tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_exp_neg_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del buf0 del primals_2 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class LinearI_NegNew(nn.Linear): def ibp_forward(self, l, u): weight = -self.weight.exp() if self.bias is not None: l_ = (weight.clamp(min=0) @ l.t() + weight.clamp(max=0) @ u.t() + self.bias[:, None]).t() u_ = (weight.clamp(min=0) @ u.t() + weight.clamp(max=0) @ l.t() + self.bias[:, None]).t() else: l_ = (weight.clamp(min=0) @ l.t() + weight.clamp(max=0) @ u.t()).t( ) u_ = (weight.clamp(min=0) @ u.t() + weight.clamp(max=0) @ l.t()).t( ) return l_, u_ def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AlexMeinke/Provable-OOD-Detection
LinearI_Neg
false
7,695
[ "MIT" ]
21
9a132aec994ff718c96b81885736ab866df60d87
https://github.com/AlexMeinke/Provable-OOD-Detection/tree/9a132aec994ff718c96b81885736ab866df60d87
KDETH
import torch from torch import nn import torch.nn.functional as F class KDTH(nn.Module): """KD with a Teacher Head auxiliary loss""" def __init__(self, T=4): super(KDTH, self).__init__() self.T = T def forward(self, y_s, y_t): y_s_th = y_s[1] y_s = y_s[0] p_t = F.softmax(y_t / self.T, dim=1) p_s_th = F.log_softmax(y_s_th / self.T, dim=1) loss_th = F.kl_div(p_s_th, p_t, size_average=False ) * self.T ** 2 / y_s.shape[0] return loss_th class KDE(nn.Module): """KD on embeddings - KDE""" def __init__(self): super(KDE, self).__init__() def forward(self, embedding_s, embedding_t): inputs_embed = F.normalize(embedding_s, p=2.0, dim=1) targets_embed = F.normalize(embedding_t, p=2.0, dim=1) loss_kde = nn.MSELoss(reduction='sum')(inputs_embed, targets_embed) return loss_kde class KDETH(nn.Module): """Combination of KDE and TH""" def __init__(self, T=4, th_weight=1.0): super(KDETH, self).__init__() self.kde = KDE() self.kdth = KDTH(T=T) self.th_weight = th_weight def forward(self, y_s, y_t, embedding_s, embedding_t): loss_kde = self.kde(embedding_s, embedding_t) loss_kdth = self.kdth(y_s, y_t) loss = loss_kde + self.th_weight * loss_kdth return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x3, tmp17, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + (64 + x3), xmask) tmp3 = tl.load(in_ptr0 + (64 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (68 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (72 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (76 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x3, tmp13, xmask) @triton.jit def triton_per_fused__log_softmax__softmax_add_div_mse_loss_mul_sub_sum_xlogy_3( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 r4 = rindex % 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr1 + r3, None) tmp17 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp22 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp36 = tl.load(in_ptr2 + r3, None) tmp37 = tl.load(in_ptr2 + (r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp38 = tl.load(in_ptr2 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp40 = tl.load(in_ptr2 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp42 = tl.load(in_ptr2 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp53 = tl.load(in_ptr3 + r4, None, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tmp18 = tmp17 * tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = libdevice.sqrt(tmp27) tmp29 = triton_helpers.maximum(tmp28, tmp13) tmp30 = tmp16 / tmp29 tmp31 = tmp15 - tmp30 tmp32 = tmp31 * tmp31 tmp33 = tl.broadcast_to(tmp32, [RBLOCK]) tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0)) tmp39 = tmp37 + tmp38 tmp41 = tmp39 + tmp40 tmp43 = tmp41 + tmp42 tmp44 = tmp36 / tmp43 tmp45 = libdevice.isnan(tmp44).to(tl.int1) tmp46 = 0.0 tmp47 = tmp44 == tmp46 tmp48 = tl_math.log(tmp44) tmp49 = tmp44 * tmp48 tmp50 = tl.where(tmp47, tmp46, tmp49) tmp51 = float('nan') tmp52 = tl.where(tmp45, tmp51, tmp50) tmp54 = tmp44 * tmp53 tmp55 = tmp52 - tmp54 tmp56 = tl.broadcast_to(tmp55, [RBLOCK]) tmp58 = triton_helpers.promote_to_tensor(tl.sum(tmp56, 0)) tmp59 = 16.0 tmp60 = tmp58 * tmp59 tmp61 = 0.25 tmp62 = tmp60 * tmp61 tmp63 = 1.0 tmp64 = tmp62 * tmp63 tmp65 = tmp35 + tmp64 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp65, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg3_1, buf2, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg3_1 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_1[grid(64)](arg2_1, buf4, 64, XBLOCK=64, num_warps =1, num_stages=1) del arg2_1 buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__log_softmax_2[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf4 buf1 = empty_strided_cuda((), (), torch.float32) buf7 = buf1 del buf1 triton_per_fused__log_softmax__softmax_add_div_mse_loss_mul_sub_sum_xlogy_3[ grid(1)](buf7, arg0_1, arg1_1, buf2, buf5, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del buf2 del buf5 return buf7, class KDTH(nn.Module): """KD with a Teacher Head auxiliary loss""" def __init__(self, T=4): super(KDTH, self).__init__() self.T = T def forward(self, y_s, y_t): y_s_th = y_s[1] y_s = y_s[0] p_t = F.softmax(y_t / self.T, dim=1) p_s_th = F.log_softmax(y_s_th / self.T, dim=1) loss_th = F.kl_div(p_s_th, p_t, size_average=False ) * self.T ** 2 / y_s.shape[0] return loss_th class KDE(nn.Module): """KD on embeddings - KDE""" def __init__(self): super(KDE, self).__init__() def forward(self, embedding_s, embedding_t): inputs_embed = F.normalize(embedding_s, p=2.0, dim=1) targets_embed = F.normalize(embedding_t, p=2.0, dim=1) loss_kde = nn.MSELoss(reduction='sum')(inputs_embed, targets_embed) return loss_kde class KDETHNew(nn.Module): """Combination of KDE and TH""" def __init__(self, T=4, th_weight=1.0): super(KDETHNew, self).__init__() self.kde = KDE() self.kdth = KDTH(T=T) self.th_weight = th_weight def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
Alibaba-MIIL/HeadSharingKD
KDETH
false
7,696
[ "BSD-2-Clause" ]
15
8e2738bf069c7d12ec933f9b9107f267f7b6603a
https://github.com/Alibaba-MIIL/HeadSharingKD/tree/8e2738bf069c7d12ec933f9b9107f267f7b6603a
Unet_2levels
import torch import torch.nn as nn class Unet_2levels(nn.Module): def __init__(self): super().__init__() self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) self.l11 = nn.Conv2d(1, 64, 3, padding=1) self.l12 = nn.Conv2d(64, 64, 3, padding=1) self.l21 = nn.Conv2d(64, 128, 3, padding=1) self.l22 = nn.Conv2d(128, 128, 3, padding=1) self.l31 = nn.Conv2d(128, 256, 3, padding=1) self.l32 = nn.Conv2d(256, 256, 3, padding=1) self.l41 = nn.Conv2d(256, 128, 3, padding=1) self.l42 = nn.Conv2d(128, 128, 3, padding=1) self.l51 = nn.Conv2d(128, 64, 3, padding=1) self.l52 = nn.Conv2d(64, 64, 3, padding=1) self.l53 = nn.Conv2d(64, 1, 1, padding=0) self.up1 = nn.ConvTranspose2d(256, 128, 2, 2, padding=0, output_padding=0) self.up2 = nn.ConvTranspose2d(128, 64, 2, 2, padding=0, output_padding=0) def forward(self, x): h11 = self.relu(self.l11(x)) h12 = self.relu(self.l12(h11)) h21 = self.relu(self.l21(self.maxpool(h12))) h22 = self.relu(self.l22(h21)) h31 = self.relu(self.l31(self.maxpool(h22))) h32 = self.relu(self.l32(h31)) h41 = self.relu(self.l41(torch.cat([h22, self.up1(h32)], dim=1))) h42 = self.relu(self.l42(h41)) h51 = self.relu(self.l51(torch.cat([h12, self.up2(h42)], dim=1))) h52 = self.relu(self.l52(h51)) return self.sigmoid(self.l53(h52)) def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 1024 % 256 x0 = xindex % 1024 x2 = xindex // 262144 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 131072 * x2), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 256, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 1024 * (-128 + x1) + 131072 * x2), tmp6, other=0.0) tmp10 = tl.load(in_ptr2 + (-128 + x1), tmp6, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + x3, tmp14, None) @triton.jit def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 128 x0 = xindex % 4096 x2 = xindex // 524288 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 262144 * x2), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 4096 * (-64 + x1) + 262144 * x2), tmp6, other=0.0) tmp10 = tl.load(in_ptr2 + (-64 + x1), tmp6, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + x3, tmp14, None) @triton.jit def triton_poi_fused_convolution_sigmoid_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27) = args args.clear() assert_size_stride(primals_1, (64, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (256, 128, 2, 2), (512, 4, 2, 1)) assert_size_stride(primals_15, (128,), (1,)) assert_size_stride(primals_16, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (128,), (1,)) assert_size_stride(primals_18, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_19, (128,), (1,)) assert_size_stride(primals_20, (128, 64, 2, 2), (256, 4, 2, 1)) assert_size_stride(primals_21, (64,), (1,)) assert_size_stride(primals_22, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_23, (64,), (1,)) assert_size_stride(primals_24, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_25, (64,), (1,)) assert_size_stride(primals_26, (1, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_27, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_2, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_0[grid(1048576)](buf3, primals_5, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) buf5 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(262144)](buf3, buf4, buf5, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_2[grid(524288)](buf7, primals_7, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_2[grid(524288)](buf9, primals_9, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf10 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32) buf11 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(131072)](buf9, buf10, buf11, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 256, 16, 16), (65536, 256, 16, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_relu_4[grid(262144)](buf13, primals_11, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 256, 16, 16), (65536, 256, 16, 1)) buf15 = buf14 del buf14 triton_poi_fused_convolution_relu_4[grid(262144)](buf15, primals_13, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf16 = extern_kernels.convolution(buf15, primals_14, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf17 = empty_strided_cuda((4, 256, 32, 32), (262144, 1024, 32, 1), torch.float32) triton_poi_fused_cat_5[grid(1048576)](buf9, buf16, primals_15, buf17, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del buf16 del primals_15 buf18 = extern_kernels.convolution(buf17, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf19 = buf18 del buf18 triton_poi_fused_convolution_relu_2[grid(524288)](buf19, primals_17, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_17 buf20 = extern_kernels.convolution(buf19, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf21 = buf20 del buf20 triton_poi_fused_convolution_relu_2[grid(524288)](buf21, primals_19, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_19 buf22 = extern_kernels.convolution(buf21, primals_20, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf23 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) triton_poi_fused_cat_6[grid(2097152)](buf3, buf22, primals_21, buf23, 2097152, XBLOCK=1024, num_warps=4, num_stages=1) del buf22 del primals_21 buf24 = extern_kernels.convolution(buf23, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf25 = buf24 del buf24 triton_poi_fused_convolution_relu_0[grid(1048576)](buf25, primals_23, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_23 buf26 = extern_kernels.convolution(buf25, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf27 = buf26 del buf26 triton_poi_fused_convolution_relu_0[grid(1048576)](buf27, primals_25, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_25 buf28 = extern_kernels.convolution(buf27, primals_26, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf29 = buf28 del buf28 triton_poi_fused_convolution_sigmoid_7[grid(16384)](buf29, primals_27, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_27 return (buf29, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, buf1, buf3, buf4, buf5, buf7, buf9, buf10, buf11, buf13, buf15, buf17, buf19, buf21, buf23, buf25, buf27, buf29) class Unet_2levelsNew(nn.Module): def __init__(self): super().__init__() self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) self.l11 = nn.Conv2d(1, 64, 3, padding=1) self.l12 = nn.Conv2d(64, 64, 3, padding=1) self.l21 = nn.Conv2d(64, 128, 3, padding=1) self.l22 = nn.Conv2d(128, 128, 3, padding=1) self.l31 = nn.Conv2d(128, 256, 3, padding=1) self.l32 = nn.Conv2d(256, 256, 3, padding=1) self.l41 = nn.Conv2d(256, 128, 3, padding=1) self.l42 = nn.Conv2d(128, 128, 3, padding=1) self.l51 = nn.Conv2d(128, 64, 3, padding=1) self.l52 = nn.Conv2d(64, 64, 3, padding=1) self.l53 = nn.Conv2d(64, 1, 1, padding=0) self.up1 = nn.ConvTranspose2d(256, 128, 2, 2, padding=0, output_padding=0) self.up2 = nn.ConvTranspose2d(128, 64, 2, 2, padding=0, output_padding=0) def forward(self, input_0): primals_1 = self.l11.weight primals_2 = self.l11.bias primals_4 = self.l12.weight primals_5 = self.l12.bias primals_6 = self.l21.weight primals_7 = self.l21.bias primals_8 = self.l22.weight primals_9 = self.l22.bias primals_10 = self.l31.weight primals_11 = self.l31.bias primals_12 = self.l32.weight primals_13 = self.l32.bias primals_16 = self.l41.weight primals_15 = self.l41.bias primals_18 = self.l42.weight primals_17 = self.l42.bias primals_22 = self.l51.weight primals_21 = self.l51.bias primals_24 = self.l52.weight primals_23 = self.l52.bias primals_26 = self.l53.weight primals_27 = self.l53.bias primals_14 = self.up1.weight primals_19 = self.up1.bias primals_20 = self.up2.weight primals_25 = self.up2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27]) return output[0]
AbdulMuqadim2001/dvae-refiner
Unet_2levels
false
7,697
[ "MIT" ]
27
c1ff46f91b28e613a3b7b157f8fd97ddf43e6fb2
https://github.com/AbdulMuqadim2001/dvae-refiner/tree/c1ff46f91b28e613a3b7b157f8fd97ddf43e6fb2
SavageLoss
import torch import torch.nn as nn import torch.distributions import torch.utils.data class SavageLoss(nn.Module): def __init__(self): super().__init__() def forward(self, output): return 1 / (1 + output.exp()) ** 2 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.distributions import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_exp_mul_pow_reciprocal_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.exp(tmp0) tmp2 = 1.0 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp3 tmp5 = tl.full([1], 1, tl.int32) tmp6 = tmp5 / tmp4 tmp7 = tmp6 * tmp2 tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_exp_mul_pow_reciprocal_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SavageLossNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AlexMeinke/Provable-OOD-Detection
SavageLoss
false
7,698
[ "MIT" ]
21
9a132aec994ff718c96b81885736ab866df60d87
https://github.com/AlexMeinke/Provable-OOD-Detection/tree/9a132aec994ff718c96b81885736ab866df60d87
UniformNoiseGenerator
import torch import torch.distributions import torch.utils.data class AdversarialNoiseGenerator(torch.nn.Module): def __init__(self): super().__init__() return def forward(self, x): raise NotImplementedError() class UniformNoiseGenerator(AdversarialNoiseGenerator): def __init__(self, min=0.0, max=1.0): super().__init__() self.min = min self.max = max def forward(self, x): return (self.max - self.min) * torch.rand_like(x) + self.min def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch import device import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.distributions import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_add_mul_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = 0.0 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch. float32, device=device(type='cuda', index=0), pin_memory=False) buf1 = buf0 del buf0 buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf2, class AdversarialNoiseGenerator(torch.nn.Module): def __init__(self): super().__init__() return def forward(self, x): raise NotImplementedError() class UniformNoiseGeneratorNew(AdversarialNoiseGenerator): def __init__(self, min=0.0, max=1.0): super().__init__() self.min = min self.max = max def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AlexMeinke/Provable-OOD-Detection
UniformNoiseGenerator
false
7,699
[ "MIT" ]
21
9a132aec994ff718c96b81885736ab866df60d87
https://github.com/AlexMeinke/Provable-OOD-Detection/tree/9a132aec994ff718c96b81885736ab866df60d87
Dunet_2levels
import torch import torch.nn as nn class Unet_2levels(nn.Module): def __init__(self): super().__init__() self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) self.l11 = nn.Conv2d(1, 64, 3, padding=1) self.l12 = nn.Conv2d(64, 64, 3, padding=1) self.l21 = nn.Conv2d(64, 128, 3, padding=1) self.l22 = nn.Conv2d(128, 128, 3, padding=1) self.l31 = nn.Conv2d(128, 256, 3, padding=1) self.l32 = nn.Conv2d(256, 256, 3, padding=1) self.l41 = nn.Conv2d(256, 128, 3, padding=1) self.l42 = nn.Conv2d(128, 128, 3, padding=1) self.l51 = nn.Conv2d(128, 64, 3, padding=1) self.l52 = nn.Conv2d(64, 64, 3, padding=1) self.l53 = nn.Conv2d(64, 1, 1, padding=0) self.up1 = nn.ConvTranspose2d(256, 128, 2, 2, padding=0, output_padding=0) self.up2 = nn.ConvTranspose2d(128, 64, 2, 2, padding=0, output_padding=0) def forward(self, x): h11 = self.relu(self.l11(x)) h12 = self.relu(self.l12(h11)) h21 = self.relu(self.l21(self.maxpool(h12))) h22 = self.relu(self.l22(h21)) h31 = self.relu(self.l31(self.maxpool(h22))) h32 = self.relu(self.l32(h31)) h41 = self.relu(self.l41(torch.cat([h22, self.up1(h32)], dim=1))) h42 = self.relu(self.l42(h41)) h51 = self.relu(self.l51(torch.cat([h12, self.up2(h42)], dim=1))) h52 = self.relu(self.l52(h51)) return self.sigmoid(self.l53(h52)) class Dunet_2levels(nn.Module): def __init__(self): super().__init__() self.segmentator = Unet_2levels() self.refiner = Unet_2levels() def segment(self, x): return self.segmentator(x) def refine(self, x): return self.refiner(x) def forward(self, x): seg = self.segment(x) return seg, self.refine(seg) def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 1024 % 256 x0 = xindex % 1024 x2 = xindex // 262144 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 131072 * x2), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 256, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 1024 * (-128 + x1) + 131072 * x2), tmp6, other=0.0) tmp10 = tl.load(in_ptr2 + (-128 + x1), tmp6, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + x3, tmp14, None) @triton.jit def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 128 x0 = xindex % 4096 x2 = xindex // 524288 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 262144 * x2), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 4096 * (-64 + x1) + 262144 * x2), tmp6, other=0.0) tmp10 = tl.load(in_ptr2 + (-64 + x1), tmp6, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + x3, tmp14, None) @triton.jit def triton_poi_fused_convolution_sigmoid_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53 ) = args args.clear() assert_size_stride(primals_1, (64, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (256, 128, 2, 2), (512, 4, 2, 1)) assert_size_stride(primals_15, (128,), (1,)) assert_size_stride(primals_16, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (128,), (1,)) assert_size_stride(primals_18, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_19, (128,), (1,)) assert_size_stride(primals_20, (128, 64, 2, 2), (256, 4, 2, 1)) assert_size_stride(primals_21, (64,), (1,)) assert_size_stride(primals_22, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_23, (64,), (1,)) assert_size_stride(primals_24, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_25, (64,), (1,)) assert_size_stride(primals_26, (1, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_27, (1,), (1,)) assert_size_stride(primals_28, (64, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_29, (64,), (1,)) assert_size_stride(primals_30, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_31, (64,), (1,)) assert_size_stride(primals_32, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_33, (128,), (1,)) assert_size_stride(primals_34, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_35, (128,), (1,)) assert_size_stride(primals_36, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_37, (256,), (1,)) assert_size_stride(primals_38, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_39, (256,), (1,)) assert_size_stride(primals_40, (256, 128, 2, 2), (512, 4, 2, 1)) assert_size_stride(primals_41, (128,), (1,)) assert_size_stride(primals_42, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_43, (128,), (1,)) assert_size_stride(primals_44, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_45, (128,), (1,)) assert_size_stride(primals_46, (128, 64, 2, 2), (256, 4, 2, 1)) assert_size_stride(primals_47, (64,), (1,)) assert_size_stride(primals_48, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_49, (64,), (1,)) assert_size_stride(primals_50, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_51, (64,), (1,)) assert_size_stride(primals_52, (1, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_53, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_2, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_0[grid(1048576)](buf3, primals_5, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) buf5 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(262144)](buf3, buf4, buf5, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_2[grid(524288)](buf7, primals_7, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_2[grid(524288)](buf9, primals_9, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf10 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32) buf11 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(131072)](buf9, buf10, buf11, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 256, 16, 16), (65536, 256, 16, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_relu_4[grid(262144)](buf13, primals_11, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 256, 16, 16), (65536, 256, 16, 1)) buf15 = buf14 del buf14 triton_poi_fused_convolution_relu_4[grid(262144)](buf15, primals_13, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf16 = extern_kernels.convolution(buf15, primals_14, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf17 = empty_strided_cuda((4, 256, 32, 32), (262144, 1024, 32, 1), torch.float32) triton_poi_fused_cat_5[grid(1048576)](buf9, buf16, primals_15, buf17, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del buf16 del primals_15 buf18 = extern_kernels.convolution(buf17, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf19 = buf18 del buf18 triton_poi_fused_convolution_relu_2[grid(524288)](buf19, primals_17, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_17 buf20 = extern_kernels.convolution(buf19, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf21 = buf20 del buf20 triton_poi_fused_convolution_relu_2[grid(524288)](buf21, primals_19, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_19 buf22 = extern_kernels.convolution(buf21, primals_20, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf23 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) triton_poi_fused_cat_6[grid(2097152)](buf3, buf22, primals_21, buf23, 2097152, XBLOCK=1024, num_warps=4, num_stages=1) del primals_21 buf24 = extern_kernels.convolution(buf23, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf25 = buf24 del buf24 triton_poi_fused_convolution_relu_0[grid(1048576)](buf25, primals_23, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_23 buf26 = extern_kernels.convolution(buf25, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf27 = buf26 del buf26 triton_poi_fused_convolution_relu_0[grid(1048576)](buf27, primals_25, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_25 buf28 = extern_kernels.convolution(buf27, primals_26, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf29 = buf28 del buf28 triton_poi_fused_convolution_sigmoid_7[grid(16384)](buf29, primals_27, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_27 buf30 = extern_kernels.convolution(buf29, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf31 = buf30 del buf30 triton_poi_fused_convolution_relu_0[grid(1048576)](buf31, primals_29, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_29 buf32 = extern_kernels.convolution(buf31, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf33 = buf32 del buf32 triton_poi_fused_convolution_relu_0[grid(1048576)](buf33, primals_31, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_31 buf34 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) buf35 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(262144)](buf33, buf34, buf35, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf36 = extern_kernels.convolution(buf34, primals_32, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf37 = buf36 del buf36 triton_poi_fused_convolution_relu_2[grid(524288)](buf37, primals_33, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_33 buf38 = extern_kernels.convolution(buf37, primals_34, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf39 = buf38 del buf38 triton_poi_fused_convolution_relu_2[grid(524288)](buf39, primals_35, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_35 buf40 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32) buf41 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(131072)](buf39, buf40, buf41, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf42 = extern_kernels.convolution(buf40, primals_36, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 256, 16, 16), (65536, 256, 16, 1)) buf43 = buf42 del buf42 triton_poi_fused_convolution_relu_4[grid(262144)](buf43, primals_37, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_37 buf44 = extern_kernels.convolution(buf43, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf44, (4, 256, 16, 16), (65536, 256, 16, 1)) buf45 = buf44 del buf44 triton_poi_fused_convolution_relu_4[grid(262144)](buf45, primals_39, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_39 buf46 = extern_kernels.convolution(buf45, primals_40, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf46, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf47 = reinterpret_tensor(buf22, (4, 256, 32, 32), (262144, 1024, 32, 1), 0) del buf22 triton_poi_fused_cat_5[grid(1048576)](buf39, buf46, primals_41, buf47, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del buf46 del primals_41 buf48 = extern_kernels.convolution(buf47, primals_42, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf48, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf49 = buf48 del buf48 triton_poi_fused_convolution_relu_2[grid(524288)](buf49, primals_43, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_43 buf50 = extern_kernels.convolution(buf49, primals_44, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf50, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf51 = buf50 del buf50 triton_poi_fused_convolution_relu_2[grid(524288)](buf51, primals_45, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_45 buf52 = extern_kernels.convolution(buf51, primals_46, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf52, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf53 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) triton_poi_fused_cat_6[grid(2097152)](buf33, buf52, primals_47, buf53, 2097152, XBLOCK=1024, num_warps=4, num_stages=1) del buf52 del primals_47 buf54 = extern_kernels.convolution(buf53, primals_48, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf54, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf55 = buf54 del buf54 triton_poi_fused_convolution_relu_0[grid(1048576)](buf55, primals_49, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_49 buf56 = extern_kernels.convolution(buf55, primals_50, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf56, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf57 = buf56 del buf56 triton_poi_fused_convolution_relu_0[grid(1048576)](buf57, primals_51, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_51 buf58 = extern_kernels.convolution(buf57, primals_52, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf58, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf59 = buf58 del buf58 triton_poi_fused_convolution_sigmoid_7[grid(16384)](buf59, primals_53, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_53 return (buf29, buf59, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, primals_46, primals_48, primals_50, primals_52, buf1, buf3, buf4, buf5, buf7, buf9, buf10, buf11, buf13, buf15, buf17, buf19, buf21, buf23, buf25, buf27, buf29, buf31, buf33, buf34, buf35, buf37, buf39, buf40, buf41, buf43, buf45, buf47, buf49, buf51, buf53, buf55, buf57, buf59) class Unet_2levels(nn.Module): def __init__(self): super().__init__() self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) self.l11 = nn.Conv2d(1, 64, 3, padding=1) self.l12 = nn.Conv2d(64, 64, 3, padding=1) self.l21 = nn.Conv2d(64, 128, 3, padding=1) self.l22 = nn.Conv2d(128, 128, 3, padding=1) self.l31 = nn.Conv2d(128, 256, 3, padding=1) self.l32 = nn.Conv2d(256, 256, 3, padding=1) self.l41 = nn.Conv2d(256, 128, 3, padding=1) self.l42 = nn.Conv2d(128, 128, 3, padding=1) self.l51 = nn.Conv2d(128, 64, 3, padding=1) self.l52 = nn.Conv2d(64, 64, 3, padding=1) self.l53 = nn.Conv2d(64, 1, 1, padding=0) self.up1 = nn.ConvTranspose2d(256, 128, 2, 2, padding=0, output_padding=0) self.up2 = nn.ConvTranspose2d(128, 64, 2, 2, padding=0, output_padding=0) def forward(self, x): h11 = self.relu(self.l11(x)) h12 = self.relu(self.l12(h11)) h21 = self.relu(self.l21(self.maxpool(h12))) h22 = self.relu(self.l22(h21)) h31 = self.relu(self.l31(self.maxpool(h22))) h32 = self.relu(self.l32(h31)) h41 = self.relu(self.l41(torch.cat([h22, self.up1(h32)], dim=1))) h42 = self.relu(self.l42(h41)) h51 = self.relu(self.l51(torch.cat([h12, self.up2(h42)], dim=1))) h52 = self.relu(self.l52(h51)) return self.sigmoid(self.l53(h52)) class Dunet_2levelsNew(nn.Module): def __init__(self): super().__init__() self.segmentator = Unet_2levels() self.refiner = Unet_2levels() def segment(self, x): return self.segmentator(x) def refine(self, x): return self.refiner(x) def forward(self, input_0): primals_1 = self.segmentator.l11.weight primals_2 = self.segmentator.l11.bias primals_4 = self.segmentator.l12.weight primals_5 = self.segmentator.l12.bias primals_6 = self.segmentator.l21.weight primals_7 = self.segmentator.l21.bias primals_8 = self.segmentator.l22.weight primals_9 = self.segmentator.l22.bias primals_10 = self.segmentator.l31.weight primals_11 = self.segmentator.l31.bias primals_12 = self.segmentator.l32.weight primals_13 = self.segmentator.l32.bias primals_16 = self.segmentator.l41.weight primals_15 = self.segmentator.l41.bias primals_18 = self.segmentator.l42.weight primals_17 = self.segmentator.l42.bias primals_22 = self.segmentator.l51.weight primals_21 = self.segmentator.l51.bias primals_24 = self.segmentator.l52.weight primals_23 = self.segmentator.l52.bias primals_26 = self.segmentator.l53.weight primals_27 = self.segmentator.l53.bias primals_14 = self.segmentator.up1.weight primals_19 = self.segmentator.up1.bias primals_20 = self.segmentator.up2.weight primals_25 = self.segmentator.up2.bias primals_28 = self.refiner.l11.weight primals_29 = self.refiner.l11.bias primals_30 = self.refiner.l12.weight primals_31 = self.refiner.l12.bias primals_32 = self.refiner.l21.weight primals_33 = self.refiner.l21.bias primals_34 = self.refiner.l22.weight primals_35 = self.refiner.l22.bias primals_36 = self.refiner.l31.weight primals_37 = self.refiner.l31.bias primals_38 = self.refiner.l32.weight primals_39 = self.refiner.l32.bias primals_42 = self.refiner.l41.weight primals_41 = self.refiner.l41.bias primals_44 = self.refiner.l42.weight primals_43 = self.refiner.l42.bias primals_48 = self.refiner.l51.weight primals_47 = self.refiner.l51.bias primals_50 = self.refiner.l52.weight primals_49 = self.refiner.l52.bias primals_52 = self.refiner.l53.weight primals_53 = self.refiner.l53.bias primals_40 = self.refiner.up1.weight primals_45 = self.refiner.up1.bias primals_46 = self.refiner.up2.weight primals_51 = self.refiner.up2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53]) return output[0], output[1]
AbdulMuqadim2001/dvae-refiner
Dunet_2levels
false
7,700
[ "MIT" ]
27
c1ff46f91b28e613a3b7b157f8fd97ddf43e6fb2
https://github.com/AbdulMuqadim2001/dvae-refiner/tree/c1ff46f91b28e613a3b7b157f8fd97ddf43e6fb2
GlobalAttention
import torch import torch.nn as nn import torch.cuda def aeq(*args): """ Assert all arguments have the same value """ arguments = (arg for arg in args) first = next(arguments) assert all(arg == first for arg in arguments ), 'Not all arguments have the same value: ' + str(args) def sequence_mask(lengths, max_len=None): """ Creates a boolean mask from sequence lengths. """ batch_size = lengths.numel() max_len = max_len or lengths.max() return torch.arange(0, max_len).type_as(lengths).repeat(batch_size, 1).lt( lengths.unsqueeze(1)) class Bottle(nn.Module): def forward(self, input): if len(input.size()) <= 2: return super(Bottle, self).forward(input) size = input.size()[:2] out = super(Bottle, self).forward(input.view(size[0] * size[1], -1)) return out.contiguous().view(size[0], size[1], -1) class BottleLinear(Bottle, nn.Linear): pass class GlobalAttention(nn.Module): """ Global attention takes a matrix and a query vector. It then computes a parameterized convex combination of the matrix based on the input query. Constructs a unit mapping a query `q` of size `dim` and a source matrix `H` of size `n x dim`, to an output of size `dim`. .. mermaid:: graph BT A[Query] subgraph RNN C[H 1] D[H 2] E[H N] end F[Attn] G[Output] A --> F C --> F D --> F E --> F C -.-> G D -.-> G E -.-> G F --> G All models compute the output as :math:`c = \\sum_{j=1}^{SeqLength} a_j H_j` where :math:`a_j` is the softmax of a score function. Then then apply a projection layer to [q, c]. However they differ on how they compute the attention score. * Luong Attention (dot, general): * dot: :math:`score(H_j,q) = H_j^T q` * general: :math:`score(H_j, q) = H_j^T W_a q` * Bahdanau Attention (mlp): * :math:`score(H_j, q) = v_a^T tanh(W_a q + U_a h_j)` Args: dim (int): dimensionality of query and key coverage (bool): use coverage term attn_type (str): type of attention to use, options [dot,general,mlp] """ def __init__(self, dim, coverage=False, attn_type='dot'): super(GlobalAttention, self).__init__() self.dim = dim self.attn_type = attn_type assert self.attn_type in ['dot', 'general', 'mlp' ], 'Please select a valid attention type.' if self.attn_type == 'general': self.linear_in = nn.Linear(dim, dim, bias=False) elif self.attn_type == 'mlp': self.linear_context = BottleLinear(dim, dim, bias=False) self.linear_query = nn.Linear(dim, dim, bias=True) self.v = BottleLinear(dim, 1, bias=False) out_bias = self.attn_type == 'mlp' self.linear_out = nn.Linear(dim * 2, dim, bias=out_bias) self.sm = nn.Softmax() self.tanh = nn.Tanh() if coverage: self.linear_cover = nn.Linear(1, dim, bias=False) def score(self, h_t, h_s): """ Args: h_t (`FloatTensor`): sequence of queries `[batch x tgt_len x dim]` h_s (`FloatTensor`): sequence of sources `[batch x src_len x dim]` Returns: :obj:`FloatTensor`: raw attention scores (unnormalized) for each src index `[batch x tgt_len x src_len]` """ src_batch, src_len, src_dim = h_s.size() tgt_batch, tgt_len, tgt_dim = h_t.size() aeq(src_batch, tgt_batch) aeq(src_dim, tgt_dim) aeq(self.dim, src_dim) if self.attn_type in ['general', 'dot']: if self.attn_type == 'general': h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim) h_t_ = self.linear_in(h_t_) h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim) h_s_ = h_s.transpose(1, 2) return torch.bmm(h_t, h_s_) else: dim = self.dim wq = self.linear_query(h_t.view(-1, dim)) wq = wq.view(tgt_batch, tgt_len, 1, dim) wq = wq.expand(tgt_batch, tgt_len, src_len, dim) uh = self.linear_context(h_s.contiguous().view(-1, dim)) uh = uh.view(src_batch, 1, src_len, dim) uh = uh.expand(src_batch, tgt_len, src_len, dim) wquh = self.tanh(wq + uh) return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len) def forward(self, input, context, context_lengths=None, coverage=None): """ Args: input (`FloatTensor`): query vectors `[batch x tgt_len x dim]` context (`FloatTensor`): source vectors `[batch x src_len x dim]` context_lengths (`LongTensor`): the source context lengths `[batch]` coverage (`FloatTensor`): None (not supported yet) Returns: (`FloatTensor`, `FloatTensor`): * Computed vector `[tgt_len x batch x dim]` * Attention distribtutions for each query `[tgt_len x batch x src_len]` """ if input.dim() == 2: one_step = True input = input.unsqueeze(1) else: one_step = False batch, sourceL, dim = context.size() batch_, targetL, dim_ = input.size() aeq(batch, batch_) aeq(dim, dim_) aeq(self.dim, dim) if coverage is not None: batch_, sourceL_ = coverage.size() aeq(batch, batch_) aeq(sourceL, sourceL_) if coverage is not None: cover = coverage.view(-1).unsqueeze(1) context += self.linear_cover(cover).view_as(context) context = self.tanh(context) align = self.score(input, context) if context_lengths is not None: mask = sequence_mask(context_lengths) mask = mask.unsqueeze(1) align.data.masked_fill_(1 - mask, -float('inf')) align_vectors = self.sm(align.view(batch * targetL, sourceL)) align_vectors = align_vectors.view(batch, targetL, sourceL) c = torch.bmm(align_vectors, context) concat_c = torch.cat([c, input], 2).view(batch * targetL, dim * 2) attn_h = self.linear_out(concat_c).view(batch, targetL, dim) if self.attn_type in ['general', 'dot']: attn_h = self.tanh(attn_h) if one_step: attn_h = attn_h.squeeze(1) align_vectors = align_vectors.squeeze(1) batch_, dim_ = attn_h.size() aeq(batch, batch_) aeq(dim, dim_) batch_, sourceL_ = align_vectors.size() aeq(batch, batch_) aeq(sourceL, sourceL_) else: attn_h = attn_h.transpose(0, 1).contiguous() align_vectors = align_vectors.transpose(0, 1).contiguous() targetL_, batch_, dim_ = attn_h.size() aeq(targetL, targetL_) aeq(batch, batch_) aeq(dim, dim_) targetL_, batch_, sourceL_ = align_vectors.size() aeq(targetL, targetL_) aeq(batch, batch_) aeq(sourceL, sourceL_) return attn_h, align_vectors def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.cuda assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tmp1 = libdevice.tanh(tmp0) tl.store(out_ptr0 + x3, tmp1, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(primals_1, reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0), out=buf0) buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0) del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), primals_2, out=buf3) del primals_2 buf4 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) triton_poi_fused_cat_2[grid(128)](buf3, primals_1, buf4, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf5 = reinterpret_tensor(buf3, (16, 4), (4, 1), 0) del buf3 extern_kernels.mm(reinterpret_tensor(buf4, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf5) del primals_3 buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(64)](buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_4[grid(64)](buf2, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 return buf6, buf7, reinterpret_tensor(buf4, (16, 8), (8, 1), 0), buf5 def aeq(*args): """ Assert all arguments have the same value """ arguments = (arg for arg in args) first = next(arguments) assert all(arg == first for arg in arguments ), 'Not all arguments have the same value: ' + str(args) def sequence_mask(lengths, max_len=None): """ Creates a boolean mask from sequence lengths. """ batch_size = lengths.numel() max_len = max_len or lengths.max() return torch.arange(0, max_len).type_as(lengths).repeat(batch_size, 1).lt( lengths.unsqueeze(1)) class Bottle(nn.Module): def forward(self, input): if len(input.size()) <= 2: return super(Bottle, self).forward(input) size = input.size()[:2] out = super(Bottle, self).forward(input.view(size[0] * size[1], -1)) return out.contiguous().view(size[0], size[1], -1) class BottleLinear(Bottle, nn.Linear): pass class GlobalAttentionNew(nn.Module): """ Global attention takes a matrix and a query vector. It then computes a parameterized convex combination of the matrix based on the input query. Constructs a unit mapping a query `q` of size `dim` and a source matrix `H` of size `n x dim`, to an output of size `dim`. .. mermaid:: graph BT A[Query] subgraph RNN C[H 1] D[H 2] E[H N] end F[Attn] G[Output] A --> F C --> F D --> F E --> F C -.-> G D -.-> G E -.-> G F --> G All models compute the output as :math:`c = \\sum_{j=1}^{SeqLength} a_j H_j` where :math:`a_j` is the softmax of a score function. Then then apply a projection layer to [q, c]. However they differ on how they compute the attention score. * Luong Attention (dot, general): * dot: :math:`score(H_j,q) = H_j^T q` * general: :math:`score(H_j, q) = H_j^T W_a q` * Bahdanau Attention (mlp): * :math:`score(H_j, q) = v_a^T tanh(W_a q + U_a h_j)` Args: dim (int): dimensionality of query and key coverage (bool): use coverage term attn_type (str): type of attention to use, options [dot,general,mlp] """ def __init__(self, dim, coverage=False, attn_type='dot'): super(GlobalAttentionNew, self).__init__() self.dim = dim self.attn_type = attn_type assert self.attn_type in ['dot', 'general', 'mlp' ], 'Please select a valid attention type.' if self.attn_type == 'general': self.linear_in = nn.Linear(dim, dim, bias=False) elif self.attn_type == 'mlp': self.linear_context = BottleLinear(dim, dim, bias=False) self.linear_query = nn.Linear(dim, dim, bias=True) self.v = BottleLinear(dim, 1, bias=False) out_bias = self.attn_type == 'mlp' self.linear_out = nn.Linear(dim * 2, dim, bias=out_bias) self.sm = nn.Softmax() self.tanh = nn.Tanh() if coverage: self.linear_cover = nn.Linear(1, dim, bias=False) def score(self, h_t, h_s): """ Args: h_t (`FloatTensor`): sequence of queries `[batch x tgt_len x dim]` h_s (`FloatTensor`): sequence of sources `[batch x src_len x dim]` Returns: :obj:`FloatTensor`: raw attention scores (unnormalized) for each src index `[batch x tgt_len x src_len]` """ src_batch, src_len, src_dim = h_s.size() tgt_batch, tgt_len, tgt_dim = h_t.size() aeq(src_batch, tgt_batch) aeq(src_dim, tgt_dim) aeq(self.dim, src_dim) if self.attn_type in ['general', 'dot']: if self.attn_type == 'general': h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim) h_t_ = self.linear_in(h_t_) h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim) h_s_ = h_s.transpose(1, 2) return torch.bmm(h_t, h_s_) else: dim = self.dim wq = self.linear_query(h_t.view(-1, dim)) wq = wq.view(tgt_batch, tgt_len, 1, dim) wq = wq.expand(tgt_batch, tgt_len, src_len, dim) uh = self.linear_context(h_s.contiguous().view(-1, dim)) uh = uh.view(src_batch, 1, src_len, dim) uh = uh.expand(src_batch, tgt_len, src_len, dim) wquh = self.tanh(wq + uh) return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len) def forward(self, input_0, input_1): primals_3 = self.linear_out.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
AndrewM1998/MultimodalNMT
GlobalAttention
false
7,701
[ "MIT" ]
40
b66d3a40ac9bc5c11ae124f51d1a9abf7cd6a04b
https://github.com/AndrewM1998/MultimodalNMT/tree/b66d3a40ac9bc5c11ae124f51d1a9abf7cd6a04b
MLB
import torch from torch import nn from torch.nn import functional as F class MLB(nn.Module): def __init__(self, input_dims, output_dim, mm_dim=1200, activ_input= 'relu', activ_output='relu', normalize=False, dropout_input=0.0, dropout_pre_lin=0.0, dropout_output=0.0): super(MLB, self).__init__() self.input_dims = input_dims self.mm_dim = mm_dim self.output_dim = output_dim self.activ_input = activ_input self.activ_output = activ_output self.normalize = normalize self.dropout_input = dropout_input self.dropout_pre_lin = dropout_pre_lin self.dropout_output = dropout_output self.linear0 = nn.Linear(input_dims[0], mm_dim) self.linear1 = nn.Linear(input_dims[1], mm_dim) self.linear_out = nn.Linear(mm_dim, output_dim) self.n_params = sum(p.numel() for p in self.parameters() if p. requires_grad) def forward(self, x): x0 = self.linear0(x[0]) x1 = self.linear1(x[1]) if self.activ_input: x0 = getattr(F, self.activ_input)(x0) x1 = getattr(F, self.activ_input)(x1) if self.dropout_input > 0: x0 = F.dropout(x0, p=self.dropout_input, training=self.training) x1 = F.dropout(x1, p=self.dropout_input, training=self.training) z = x0 * x1 if self.normalize: z = torch.sqrt(F.relu(z)) - torch.sqrt(F.relu(-z)) z = F.normalize(z, p=2) if self.dropout_pre_lin > 0: z = F.dropout(z, p=self.dropout_pre_lin, training=self.training) z = self.linear_out(z) if self.activ_output: z = getattr(F, self.activ_output)(z) if self.dropout_output > 0: z = F.dropout(z, p=self.dropout_output, training=self.training) return z def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dims': [4, 4], 'output_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 1200 x1 = xindex // 1200 tmp0 = tl.load(in_ptr0 + (x0 + 1216 * x1), xmask) tmp3 = tl.load(in_ptr1 + (x0 + 1216 * x1), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp1, tmp3) tmp5 = tmp2 * tmp4 tl.store(out_ptr0 + (x0 + 1216 * x1), tmp5, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1200, 4), (4, 1)) assert_size_stride(primals_3, (1200,), (1,)) assert_size_stride(primals_4, (1200, 4), (4, 1)) assert_size_stride(primals_5, (1200,), (1,)) assert_size_stride(primals_6, (4, 1200), (1200, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 1200), (1216, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1200), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((16, 1200), (1216, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (16, 4), (4, 1), 64), reinterpret_tensor(primals_4, (4, 1200), (1, 4 ), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 1200), (4864, 1216, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_relu_0[grid(19200)](buf0, buf1, buf2, 19200, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 1200), (1216, 1), 0 ), reinterpret_tensor(primals_6, (1200, 4), (1, 1200), 0), out=buf3 ) buf4 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0) del buf3 buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(64)](buf4, primals_7, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 return buf4, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), buf0, reinterpret_tensor(primals_1, (16, 4), (4, 1), 64 ), buf1, reinterpret_tensor(buf2, (16, 1200), (1216, 1), 0 ), buf5, primals_6 class MLBNew(nn.Module): def __init__(self, input_dims, output_dim, mm_dim=1200, activ_input= 'relu', activ_output='relu', normalize=False, dropout_input=0.0, dropout_pre_lin=0.0, dropout_output=0.0): super(MLBNew, self).__init__() self.input_dims = input_dims self.mm_dim = mm_dim self.output_dim = output_dim self.activ_input = activ_input self.activ_output = activ_output self.normalize = normalize self.dropout_input = dropout_input self.dropout_pre_lin = dropout_pre_lin self.dropout_output = dropout_output self.linear0 = nn.Linear(input_dims[0], mm_dim) self.linear1 = nn.Linear(input_dims[1], mm_dim) self.linear_out = nn.Linear(mm_dim, output_dim) self.n_params = sum(p.numel() for p in self.parameters() if p. requires_grad) def forward(self, input_0): primals_2 = self.linear0.weight primals_3 = self.linear0.bias primals_4 = self.linear1.weight primals_5 = self.linear1.bias primals_6 = self.linear_out.weight primals_7 = self.linear_out.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
AndresPMD/GCN_classification
MLB
false
7,702
[ "MIT" ]
39
b005c4256d68f1f90a7f73e7fdb3d066448de28c
https://github.com/AndresPMD/GCN_classification/tree/b005c4256d68f1f90a7f73e7fdb3d066448de28c
EntityClassifier
import torch import torch.nn as nn import torch.nn.functional as F class MLP(nn.Module): def __init__(self, indim, hs, outdim, mlp_drop): super().__init__() """ eh, et, |eh-et|, eh*et """ indim = 4 * indim self.linear1 = nn.Linear(indim, 2 * hs) self.linear2 = nn.Linear(2 * hs, outdim) self.drop = nn.Dropout(mlp_drop) def forward(self, head_rep, tail_rep): """ :param head_rep: (?, hs) :param tail_rep: (?, hs) :param doc_rep: (1, hs) :return: logits (?, outdim) """ mlp_input = [head_rep, tail_rep, torch.abs(head_rep - tail_rep), head_rep * tail_rep] mlp_input = torch.cat(mlp_input, -1) h = self.drop(F.relu(self.linear1(mlp_input))) return self.linear2(h) class EntityClassifier(nn.Module): def __init__(self, hs, num_class, mlp_drop): super().__init__() indim = 2 * hs self.classifier = MLP(indim, hs, num_class, mlp_drop) def forward(self, global_head, global_tail, local_head, local_tail, path2ins): ins2path = torch.transpose(path2ins, 0, 1) global_head = torch.matmul(path2ins, global_head) global_tail = torch.matmul(path2ins, global_tail) head_rep, tail_rep = [], [] head_rep.append(local_head) tail_rep.append(local_tail) head_rep.append(global_head) tail_rep.append(global_tail) head_rep = torch.cat(head_rep, dim=-1) tail_rep = torch.cat(tail_rep, dim=-1) pred = self.classifier(head_rep, tail_rep) pred = pred.squeeze(-1) pred = torch.sigmoid(pred) pred = pred.unsqueeze(0) ins2path = ins2path.unsqueeze(-1) pred = torch.max(pred * ins2path, dim=1)[0] return pred def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hs': 4, 'num_class': 4, 'mlp_drop': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 8, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 4, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tmp6 & tmp4 tmp8 = tl.load(in_ptr0 + (4 * x1 + x0), tmp7, eviction_policy= 'evict_last', other=0.0) tmp9 = tmp0 >= tmp5 tmp10 = tmp9 & tmp4 tmp11 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp10, eviction_policy= 'evict_last', other=0.0) tmp12 = tl.where(tmp6, tmp8, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp4, tmp12, tmp13) tmp15 = tmp0 >= tmp3 tmp16 = tl.full([1], 16, tl.int64) tmp17 = tmp0 < tmp16 tmp18 = tmp15 & tmp17 tmp19 = -8 + x0 tmp21 = tmp19 < tmp5 tmp22 = tmp21 & tmp18 tmp23 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp22, eviction_policy= 'evict_last', other=0.0) tmp24 = tmp19 >= tmp5 tmp26 = tmp24 & tmp18 tmp27 = tl.load(in_ptr3 + (4 * x1 + (-4 + (-8 + x0))), tmp26, eviction_policy='evict_last', other=0.0) tmp28 = tl.where(tmp21, tmp23, tmp27) tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype) tmp30 = tl.where(tmp18, tmp28, tmp29) tmp31 = tmp0 >= tmp16 tmp32 = tl.full([1], 24, tl.int64) tmp33 = tmp0 < tmp32 tmp34 = tmp31 & tmp33 tmp35 = -16 + x0 tmp37 = tmp35 < tmp5 tmp38 = tmp37 & tmp34 tmp39 = tl.load(in_ptr0 + (4 * x1 + (-16 + x0)), tmp38, eviction_policy ='evict_last', other=0.0) tmp40 = tmp35 >= tmp5 tmp42 = tmp40 & tmp34 tmp43 = tl.load(in_ptr1 + (4 * x1 + (-4 + (-16 + x0))), tmp42, eviction_policy='evict_last', other=0.0) tmp44 = tl.where(tmp37, tmp39, tmp43) tmp45 = tl.load(in_ptr2 + (4 * x1 + (-16 + x0)), tmp38, eviction_policy ='evict_last', other=0.0) tmp46 = tl.load(in_ptr3 + (4 * x1 + (-4 + (-16 + x0))), tmp42, eviction_policy='evict_last', other=0.0) tmp47 = tl.where(tmp37, tmp45, tmp46) tmp48 = tmp44 - tmp47 tmp49 = tl_math.abs(tmp48) tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype) tmp51 = tl.where(tmp34, tmp49, tmp50) tmp52 = tmp0 >= tmp32 tl.full([1], 32, tl.int64) tmp55 = -24 + x0 tmp57 = tmp55 < tmp5 tmp58 = tmp57 & tmp52 tmp59 = tl.load(in_ptr0 + (4 * x1 + (-24 + x0)), tmp58, eviction_policy ='evict_last', other=0.0) tmp60 = tmp55 >= tmp5 tmp62 = tmp60 & tmp52 tmp63 = tl.load(in_ptr1 + (4 * x1 + (-4 + (-24 + x0))), tmp62, eviction_policy='evict_last', other=0.0) tmp64 = tl.where(tmp57, tmp59, tmp63) tmp65 = tl.load(in_ptr2 + (4 * x1 + (-24 + x0)), tmp58, eviction_policy ='evict_last', other=0.0) tmp66 = tl.load(in_ptr3 + (4 * x1 + (-4 + (-24 + x0))), tmp62, eviction_policy='evict_last', other=0.0) tmp67 = tl.where(tmp57, tmp65, tmp66) tmp68 = tmp64 * tmp67 tmp69 = tl.full(tmp68.shape, 0.0, tmp68.dtype) tmp70 = tl.where(tmp52, tmp68, tmp69) tmp71 = tl.where(tmp34, tmp51, tmp70) tmp72 = tl.where(tmp18, tmp30, tmp71) tmp73 = tl.where(tmp4, tmp14, tmp72) tl.store(out_ptr0 + x2, tmp73, None) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_max_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 64 x4 = xindex // 4 x5 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (64 + x3), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (64 + x4), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (128 + x3), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (128 + x4), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (192 + x3), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (192 + x4), xmask, eviction_policy='evict_last') tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp5 = tl.sigmoid(tmp4) tmp7 = tmp5 * tmp6 tmp8 = triton_helpers.maximum(tmp3, tmp7) tmp10 = tl.sigmoid(tmp9) tmp12 = tmp10 * tmp11 tmp13 = triton_helpers.maximum(tmp8, tmp12) tmp15 = tl.sigmoid(tmp14) tmp17 = tmp15 * tmp16 tmp18 = triton_helpers.maximum(tmp13, tmp17) tl.store(out_ptr0 + x5, tmp18, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_6, (8, 32), (32, 1)) assert_size_stride(primals_7, (8,), (1,)) assert_size_stride(primals_8, (4, 8), (8, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_2, (16, 4, 4), (16, 4, 1), 0 ), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_3, (16, 4, 4), (16, 4, 1), 0 ), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch. float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(2048)](primals_4, buf0, primals_5, buf1, buf2, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 del primals_5 buf3 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 32), (32, 1), 0), reinterpret_tensor(primals_6, (32, 8), (1, 32), 0), out=buf3) del primals_6 buf4 = reinterpret_tensor(buf3, (4, 4, 4, 8), (128, 32, 8, 1), 0) del buf3 buf7 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(512)](buf4, primals_7, buf7, 512, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf5 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0) del buf1 extern_kernels.addmm(primals_9, reinterpret_tensor(buf4, (64, 8), ( 8, 1), 0), reinterpret_tensor(primals_8, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf5) del primals_9 buf6 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused_max_2[grid(256)](buf5, primals_1, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf6, primals_1, reinterpret_tensor(buf2, (64, 32), (32, 1), 0 ), reinterpret_tensor(buf4, (64, 8), (8, 1), 0), buf5, primals_8, buf7 class MLP(nn.Module): def __init__(self, indim, hs, outdim, mlp_drop): super().__init__() """ eh, et, |eh-et|, eh*et """ indim = 4 * indim self.linear1 = nn.Linear(indim, 2 * hs) self.linear2 = nn.Linear(2 * hs, outdim) self.drop = nn.Dropout(mlp_drop) def forward(self, head_rep, tail_rep): """ :param head_rep: (?, hs) :param tail_rep: (?, hs) :param doc_rep: (1, hs) :return: logits (?, outdim) """ mlp_input = [head_rep, tail_rep, torch.abs(head_rep - tail_rep), head_rep * tail_rep] mlp_input = torch.cat(mlp_input, -1) h = self.drop(F.relu(self.linear1(mlp_input))) return self.linear2(h) class EntityClassifierNew(nn.Module): def __init__(self, hs, num_class, mlp_drop): super().__init__() indim = 2 * hs self.classifier = MLP(indim, hs, num_class, mlp_drop) def forward(self, input_0, input_1, input_2, input_3, input_4): primals_6 = self.classifier.linear1.weight primals_7 = self.classifier.linear1.bias primals_8 = self.classifier.linear2.weight primals_9 = self.classifier.linear2.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 primals_4 = input_3 primals_5 = input_4 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
AndrewZhe/Three-Sentences-Are-All-You-Need
EntityClassifier
false
7,703
[ "MIT" ]
21
afad6f9e700c9a95e03ef200718ebee8e18ca016
https://github.com/AndrewZhe/Three-Sentences-Are-All-You-Need/tree/afad6f9e700c9a95e03ef200718ebee8e18ca016
HingeLoss
import torch import torch.nn as nn import torch.nn.functional as F import torch.distributions import torch.utils.data class HingeLoss(nn.Module): def __init__(self, margin=1.0): super().__init__() self.margin = margin def forward(self, output): return F.relu(self.margin - output) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.distributions import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_rsub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_relu_rsub_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 return buf0, class HingeLossNew(nn.Module): def __init__(self, margin=1.0): super().__init__() self.margin = margin def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AlexMeinke/Provable-OOD-Detection
HingeLoss
false
7,704
[ "MIT" ]
21
9a132aec994ff718c96b81885736ab866df60d87
https://github.com/AlexMeinke/Provable-OOD-Detection/tree/9a132aec994ff718c96b81885736ab866df60d87
OELoss
import torch import torch.nn as nn import torch.distributions import torch.utils.data class OELoss(nn.Module): def __init__(self): super().__init__() def forward(self, logits): return -torch.log_softmax(logits, 1).mean(1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.distributions import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_mean_neg_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp8 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 + tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 - tmp11 tmp13 = tmp2 - tmp11 tmp14 = tmp12 + tmp13 tmp15 = tmp5 - tmp11 tmp16 = tmp14 + tmp15 tmp17 = tmp8 - tmp11 tmp18 = tmp16 + tmp17 tmp19 = 4.0 tmp20 = tmp18 / tmp19 tmp21 = -tmp20 tl.store(out_ptr0 + x2, tmp21, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__log_softmax_mean_neg_1[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 return buf1, class OELossNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AlexMeinke/Provable-OOD-Detection
OELoss
false
7,705
[ "MIT" ]
21
9a132aec994ff718c96b81885736ab866df60d87
https://github.com/AlexMeinke/Provable-OOD-Detection/tree/9a132aec994ff718c96b81885736ab866df60d87
SourceContextGate
import torch import torch.nn as nn import torch.cuda class ContextGate(nn.Module): """ Context gate is a decoder module that takes as input the previous word embedding, the current decoder state and the attention state, and produces a gate. The gate can be used to select the input from the target side context (decoder state), from the source context (attention state) or both. """ def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(ContextGate, self).__init__() input_size = embeddings_size + decoder_size + attention_size self.gate = nn.Linear(input_size, output_size, bias=True) self.sig = nn.Sigmoid() self.source_proj = nn.Linear(attention_size, output_size) self.target_proj = nn.Linear(embeddings_size + decoder_size, output_size) def forward(self, prev_emb, dec_state, attn_state): input_tensor = torch.cat((prev_emb, dec_state, attn_state), dim=1) z = self.sig(self.gate(input_tensor)) proj_source = self.source_proj(attn_state) proj_target = self.target_proj(torch.cat((prev_emb, dec_state), dim=1)) return z, proj_source, proj_target class SourceContextGate(nn.Module): """Apply the context gate only to the source context""" def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(SourceContextGate, self).__init__() self.context_gate = ContextGate(embeddings_size, decoder_size, attention_size, output_size) self.tanh = nn.Tanh() def forward(self, prev_emb, dec_state, attn_state): z, source, target = self.context_gate(prev_emb, dec_state, attn_state) return self.tanh(target + z * source) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'embeddings_size': 4, 'decoder_size': 4, 'attention_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.cuda assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_tanh_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp5 = tl.load(in_ptr2 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tl.sigmoid(tmp3) tmp6 = tmp4 * tmp5 tmp7 = tmp2 + tmp6 tmp8 = libdevice.tanh(tmp7) tl.store(in_out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 12), (12, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 8), (8, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(48)](primals_1, primals_2, primals_3, buf0, 48, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, primals_3, reinterpret_tensor( primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_cat_1[grid(32)](primals_1, primals_2, buf3, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf3, reinterpret_tensor(primals_8, (8, 4), (1, 8 ), 0), out=buf4) del primals_8 buf5 = buf4 del buf4 triton_poi_fused_add_mul_sigmoid_tanh_2[grid(16)](buf5, primals_9, buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_9 return buf5, primals_3, buf0, buf1, buf2, buf3, buf5 class ContextGate(nn.Module): """ Context gate is a decoder module that takes as input the previous word embedding, the current decoder state and the attention state, and produces a gate. The gate can be used to select the input from the target side context (decoder state), from the source context (attention state) or both. """ def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(ContextGate, self).__init__() input_size = embeddings_size + decoder_size + attention_size self.gate = nn.Linear(input_size, output_size, bias=True) self.sig = nn.Sigmoid() self.source_proj = nn.Linear(attention_size, output_size) self.target_proj = nn.Linear(embeddings_size + decoder_size, output_size) def forward(self, prev_emb, dec_state, attn_state): input_tensor = torch.cat((prev_emb, dec_state, attn_state), dim=1) z = self.sig(self.gate(input_tensor)) proj_source = self.source_proj(attn_state) proj_target = self.target_proj(torch.cat((prev_emb, dec_state), dim=1)) return z, proj_source, proj_target class SourceContextGateNew(nn.Module): """Apply the context gate only to the source context""" def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(SourceContextGateNew, self).__init__() self.context_gate = ContextGate(embeddings_size, decoder_size, attention_size, output_size) self.tanh = nn.Tanh() def forward(self, input_0, input_1, input_2): primals_4 = self.context_gate.gate.weight primals_5 = self.context_gate.gate.bias primals_1 = self.context_gate.source_proj.weight primals_7 = self.context_gate.source_proj.bias primals_8 = self.context_gate.target_proj.weight primals_9 = self.context_gate.target_proj.bias primals_2 = input_0 primals_3 = input_1 primals_6 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
AndrewM1998/MultimodalNMT
SourceContextGate
false
7,706
[ "MIT" ]
40
b66d3a40ac9bc5c11ae124f51d1a9abf7cd6a04b
https://github.com/AndrewM1998/MultimodalNMT/tree/b66d3a40ac9bc5c11ae124f51d1a9abf7cd6a04b
AttentionBlock
import torch import torch.nn as nn class AttentionBlock(nn.Module): def __init__(self, in_features, middle_features, out_features): super().__init__() self.in_features = in_features self.middle_features = middle_features self.out_features = out_features self.W = nn.Linear(in_features, middle_features) self.V = nn.Linear(middle_features, out_features) def forward(self, features): att = torch.tanh(self.W(features)) score = self.V(att) attention_weights = torch.softmax(score, dim=1) context_vector = attention_weights * features context_vector = torch.sum(context_vector, dim=1) return context_vector def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'middle_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp8 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp11 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp15 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp19 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = tmp0 / tmp6 tmp9 = tmp7 * tmp8 tmp10 = tmp1 / tmp6 tmp12 = tmp10 * tmp11 tmp13 = tmp9 + tmp12 tmp14 = tmp3 / tmp6 tmp16 = tmp14 * tmp15 tmp17 = tmp13 + tmp16 tmp18 = tmp5 / tmp6 tmp20 = tmp18 * tmp19 tmp21 = tmp17 + tmp20 tl.store(out_ptr0 + x2, tmp21, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_mul_sum_2[grid(64)](buf3, primals_3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf3 return buf4, primals_3, buf1, buf2, primals_4 class AttentionBlockNew(nn.Module): def __init__(self, in_features, middle_features, out_features): super().__init__() self.in_features = in_features self.middle_features = middle_features self.out_features = out_features self.W = nn.Linear(in_features, middle_features) self.V = nn.Linear(middle_features, out_features) def forward(self, input_0): primals_1 = self.W.weight primals_2 = self.W.bias primals_4 = self.V.weight primals_5 = self.V.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Anjum48/commonlitreadabilityprize
AttentionBlock
false
7,707
[ "MIT" ]
28
b310742520b847b452ced0d27f47a934e834e4de
https://github.com/Anjum48/commonlitreadabilityprize/tree/b310742520b847b452ced0d27f47a934e834e4de
Scale_By_ParamI
import torch import torch.nn as nn import torch.distributions import torch.utils.data class Scale_By_ParamI(nn.Module): def __init__(self): super().__init__() self.scalar = nn.Parameter(torch.ones(1)) def forward(self, x): out = x * self.scalar return out def ibp_forward(self, l, u): if self.scalar >= 0: l_ = l * self.scalar u_ = u * self.scalar else: u_ = l * self.scalar l_ = u * self.scalar return l_, u_ def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.distributions import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 return buf0, primals_2 class Scale_By_ParamINew(nn.Module): def __init__(self): super().__init__() self.scalar = nn.Parameter(torch.ones(1)) def ibp_forward(self, l, u): if self.scalar >= 0: l_ = l * self.scalar u_ = u * self.scalar else: u_ = l * self.scalar l_ = u * self.scalar return l_, u_ def forward(self, input_0): primals_1 = self.scalar primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
AlexMeinke/Provable-OOD-Detection
Scale_By_ParamI
false
7,708
[ "MIT" ]
21
9a132aec994ff718c96b81885736ab866df60d87
https://github.com/AlexMeinke/Provable-OOD-Detection/tree/9a132aec994ff718c96b81885736ab866df60d87
Mish
import torch import torch.nn as nn import torch.nn.functional as F class Mish(nn.Module): def __init__(self): super().__init__() def forward(self, x): return x * torch.tanh(F.softplus(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_softplus_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = libdevice.tanh(tmp5) tmp7 = tmp0 * tmp6 tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_softplus_tanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class MishNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Archaic-Atom/JackFramework
Mish
false
7,709
[ "MIT" ]
13
e847d0bafe335ee33caf174676d12ad3c28011a6
https://github.com/Archaic-Atom/JackFramework/tree/e847d0bafe335ee33caf174676d12ad3c28011a6
TargetContextGate
import torch import torch.nn as nn import torch.cuda class ContextGate(nn.Module): """ Context gate is a decoder module that takes as input the previous word embedding, the current decoder state and the attention state, and produces a gate. The gate can be used to select the input from the target side context (decoder state), from the source context (attention state) or both. """ def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(ContextGate, self).__init__() input_size = embeddings_size + decoder_size + attention_size self.gate = nn.Linear(input_size, output_size, bias=True) self.sig = nn.Sigmoid() self.source_proj = nn.Linear(attention_size, output_size) self.target_proj = nn.Linear(embeddings_size + decoder_size, output_size) def forward(self, prev_emb, dec_state, attn_state): input_tensor = torch.cat((prev_emb, dec_state, attn_state), dim=1) z = self.sig(self.gate(input_tensor)) proj_source = self.source_proj(attn_state) proj_target = self.target_proj(torch.cat((prev_emb, dec_state), dim=1)) return z, proj_source, proj_target class TargetContextGate(nn.Module): """Apply the context gate only to the target context""" def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(TargetContextGate, self).__init__() self.context_gate = ContextGate(embeddings_size, decoder_size, attention_size, output_size) self.tanh = nn.Tanh() def forward(self, prev_emb, dec_state, attn_state): z, source, target = self.context_gate(prev_emb, dec_state, attn_state) return self.tanh(z * target + source) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'embeddings_size': 4, 'decoder_size': 4, 'attention_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.cuda assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_tanh_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_out_ptr0 + x2, xmask) tmp5 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp6 = tmp4 + tmp5 tmp7 = tmp3 + tmp6 tmp8 = libdevice.tanh(tmp7) tl.store(in_out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 12), (12, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 8), (8, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(48)](primals_1, primals_2, primals_3, buf0, 48, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_cat_1[grid(32)](primals_1, primals_2, buf3, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, buf3, reinterpret_tensor(primals_8, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf4) del primals_8 del primals_9 buf5 = buf2 del buf2 triton_poi_fused_add_mul_sigmoid_tanh_2[grid(16)](buf5, buf1, buf4, primals_7, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_7 return buf5, primals_3, buf0, buf1, buf3, buf4, buf5 class ContextGate(nn.Module): """ Context gate is a decoder module that takes as input the previous word embedding, the current decoder state and the attention state, and produces a gate. The gate can be used to select the input from the target side context (decoder state), from the source context (attention state) or both. """ def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(ContextGate, self).__init__() input_size = embeddings_size + decoder_size + attention_size self.gate = nn.Linear(input_size, output_size, bias=True) self.sig = nn.Sigmoid() self.source_proj = nn.Linear(attention_size, output_size) self.target_proj = nn.Linear(embeddings_size + decoder_size, output_size) def forward(self, prev_emb, dec_state, attn_state): input_tensor = torch.cat((prev_emb, dec_state, attn_state), dim=1) z = self.sig(self.gate(input_tensor)) proj_source = self.source_proj(attn_state) proj_target = self.target_proj(torch.cat((prev_emb, dec_state), dim=1)) return z, proj_source, proj_target class TargetContextGateNew(nn.Module): """Apply the context gate only to the target context""" def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(TargetContextGateNew, self).__init__() self.context_gate = ContextGate(embeddings_size, decoder_size, attention_size, output_size) self.tanh = nn.Tanh() def forward(self, input_0, input_1, input_2): primals_4 = self.context_gate.gate.weight primals_5 = self.context_gate.gate.bias primals_1 = self.context_gate.source_proj.weight primals_7 = self.context_gate.source_proj.bias primals_8 = self.context_gate.target_proj.weight primals_9 = self.context_gate.target_proj.bias primals_2 = input_0 primals_3 = input_1 primals_6 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
AndrewM1998/MultimodalNMT
TargetContextGate
false
7,710
[ "MIT" ]
40
b66d3a40ac9bc5c11ae124f51d1a9abf7cd6a04b
https://github.com/AndrewM1998/MultimodalNMT/tree/b66d3a40ac9bc5c11ae124f51d1a9abf7cd6a04b
MFB
import torch from torch import nn from torch.nn import functional as F class MFB(nn.Module): def __init__(self, input_dims, output_dim, mm_dim=1200, factor=2, activ_input='relu', activ_output='relu', normalize=False, dropout_input=0.0, dropout_pre_norm=0.0, dropout_output=0.0): super(MFB, self).__init__() self.input_dims = input_dims self.mm_dim = mm_dim self.factor = factor self.output_dim = output_dim self.activ_input = activ_input self.activ_output = activ_output self.normalize = normalize self.dropout_input = dropout_input self.dropout_pre_norm = dropout_pre_norm self.dropout_output = dropout_output self.linear0 = nn.Linear(input_dims[0], mm_dim * factor) self.linear1 = nn.Linear(input_dims[1], mm_dim * factor) self.linear_out = nn.Linear(mm_dim, output_dim) self.n_params = sum(p.numel() for p in self.parameters() if p. requires_grad) def forward(self, x): x0 = self.linear0(x[0]) x1 = self.linear1(x[1]) if self.activ_input: x0 = getattr(F, self.activ_input)(x0) x1 = getattr(F, self.activ_input)(x1) if self.dropout_input > 0: x0 = F.dropout(x0, p=self.dropout_input, training=self.training) x1 = F.dropout(x1, p=self.dropout_input, training=self.training) z = x0 * x1 if self.dropout_pre_norm > 0: z = F.dropout(z, p=self.dropout_pre_norm, training=self.training) z = z.view(z.size(0), self.mm_dim, self.factor) z = z.sum(2) if self.normalize: z = torch.sqrt(F.relu(z)) - torch.sqrt(F.relu(-z)) z = F.normalize(z, p=2) z = self.linear_out(z) if self.activ_output: z = getattr(F, self.activ_output)(z) if self.dropout_output > 0: z = F.dropout(z, p=self.dropout_output, training=self.training) return z def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_dims': [4, 4], 'output_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 1200 x1 = xindex // 1200 tmp0 = tl.load(in_ptr0 + 2 * x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 2 * x2, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 2 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1 + 2 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp1, tmp3) tmp5 = tmp2 * tmp4 tmp7 = triton_helpers.maximum(tmp1, tmp6) tmp9 = triton_helpers.maximum(tmp1, tmp8) tmp10 = tmp7 * tmp9 tmp11 = tmp5 + tmp10 tl.store(out_ptr0 + (x0 + 1216 * x1), tmp11, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (2400, 4), (4, 1)) assert_size_stride(primals_3, (2400,), (1,)) assert_size_stride(primals_4, (2400, 4), (4, 1)) assert_size_stride(primals_5, (2400,), (1,)) assert_size_stride(primals_6, (4, 1200), (1200, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2400), (2400, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (4, 4 ), (4, 1), 0), reinterpret_tensor(primals_2, (4, 2400), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 2400), (2400, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (4, 4 ), (4, 1), 16), reinterpret_tensor(primals_4, (4, 2400), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 1200), (1216, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sum_0[grid(4800)](buf0, buf1, buf2, 4800, XBLOCK= 256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_6, (1200, 4), (1, 1200), 0), out=buf3) buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(16)](buf4, primals_7, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_7 return buf4, reinterpret_tensor(primals_1, (4, 4), (4, 1), 0 ), buf0, reinterpret_tensor(primals_1, (4, 4), (4, 1), 16 ), buf1, buf2, buf5, primals_6 class MFBNew(nn.Module): def __init__(self, input_dims, output_dim, mm_dim=1200, factor=2, activ_input='relu', activ_output='relu', normalize=False, dropout_input=0.0, dropout_pre_norm=0.0, dropout_output=0.0): super(MFBNew, self).__init__() self.input_dims = input_dims self.mm_dim = mm_dim self.factor = factor self.output_dim = output_dim self.activ_input = activ_input self.activ_output = activ_output self.normalize = normalize self.dropout_input = dropout_input self.dropout_pre_norm = dropout_pre_norm self.dropout_output = dropout_output self.linear0 = nn.Linear(input_dims[0], mm_dim * factor) self.linear1 = nn.Linear(input_dims[1], mm_dim * factor) self.linear_out = nn.Linear(mm_dim, output_dim) self.n_params = sum(p.numel() for p in self.parameters() if p. requires_grad) def forward(self, input_0): primals_2 = self.linear0.weight primals_3 = self.linear0.bias primals_4 = self.linear1.weight primals_5 = self.linear1.bias primals_6 = self.linear_out.weight primals_7 = self.linear_out.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
AndresPMD/GCN_classification
MFB
false
7,711
[ "MIT" ]
39
b005c4256d68f1f90a7f73e7fdb3d066448de28c
https://github.com/AndresPMD/GCN_classification/tree/b005c4256d68f1f90a7f73e7fdb3d066448de28c
Mlp
import torch import torch.nn as nn class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_gelu_0[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4 class MlpNew(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Arnav0400/ViT-Slim
Mlp
false
7,712
[ "MIT" ]
14
78edd4fecbb8cd4043e9878148576b1c327c74f9
https://github.com/Arnav0400/ViT-Slim/tree/78edd4fecbb8cd4043e9878148576b1c327c74f9
QGOODLoss
import math import torch import torch.nn as nn import torch.distributions import torch.utils.data class QGOODLoss(nn.Module): def __init__(self, quantile=0.8): super().__init__() self.quantile = quantile def forward(self, ub_log_conf): batch_size_out = ub_log_conf.shape[0] l = math.floor(batch_size_out * self.quantile) h = batch_size_out - l above_quantile_indices = ub_log_conf.topk(h, largest=True)[1] return (ub_log_conf[above_quantile_indices] ** 2 / 2).log1p() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.distributions import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_index_log1p_pow_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 64 x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (x0 + 64 * tmp4), None) tmp7 = tmp6 * tmp6 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp10 = libdevice.log1p(tmp9) tl.store(out_ptr0 + x2, tmp10, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten.topk.default(arg0_1, 1) buf2 = buf0[1] del buf0 buf3 = empty_strided_cuda((4, 4, 4, 1, 4, 4, 4), (1024, 256, 64, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_index_log1p_pow_0[grid(4096)](buf2, arg0_1, buf3, 4096, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del buf2 return buf3, class QGOODLossNew(nn.Module): def __init__(self, quantile=0.8): super().__init__() self.quantile = quantile def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AlexMeinke/Provable-OOD-Detection
QGOODLoss
false
7,713
[ "MIT" ]
21
9a132aec994ff718c96b81885736ab866df60d87
https://github.com/AlexMeinke/Provable-OOD-Detection/tree/9a132aec994ff718c96b81885736ab866df60d87
MultinomialKLDivergenceLoss
import torch from torch import nn class MultinomialKLDivergenceLoss(nn.Module): def __init__(self): super().__init__() def forward(self, p_proba, q_proba): loss = q_proba * (torch.log(q_proba) - torch.log(p_proba)) loss = torch.sum(loss) return loss / (p_proba.size(1) * p_proba.size(0)) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_log_mul_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp2 = tl.load(in_ptr1 + r0, None) tmp1 = tl_math.log(tmp0) tmp3 = tl_math.log(tmp2) tmp4 = tmp1 - tmp3 tmp5 = tmp0 * tmp4 tmp6 = tl.broadcast_to(tmp5, [RBLOCK]) tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0)) tmp9 = 0.0625 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_div_log_mul_sub_sum_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class MultinomialKLDivergenceLossNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
AuCson/SEDST
MultinomialKLDivergenceLoss
false
7,714
[ "MIT" ]
23
1c1691e2abc50eb2120ed49c874090f6c4f741d3
https://github.com/AuCson/SEDST/tree/1c1691e2abc50eb2120ed49c874090f6c4f741d3
GCN
from torch.nn import Module import math import torch import torch.nn.functional as F from torch.nn.parameter import Parameter from torch.nn.modules.module import Module import torch.nn as nn class GraphConvolution(Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if bias: self.bias = Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj): support = torch.mm(input, self.weight) output = torch.mm(adj, support) if self.bias is not None: return output + self.bias else: return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GCN(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nclass) self.dropout = dropout def forward(self, x, adj): x = F.relu(self.gc1(x, adj)) x = F.dropout(x, self.dropout, training=self.training) x = self.gc2(x, adj) return F.log_softmax(x, dim=1) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module import math from torch.nn.parameter import Parameter from torch.nn.modules.module import Module import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf0, out=buf1) buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_add_relu_0[grid(16)](buf2, primals_4, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_4 buf3 = buf0 del buf0 extern_kernels.mm(buf2, primals_5, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, primals_3, buf3, alpha=1, beta=1, out=buf4) del primals_6 buf5 = buf3 del buf3 triton_poi_fused__log_softmax_1[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused__log_softmax_2[grid(16)](buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf5 return buf6, buf2, buf6, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0) class GraphConvolution(Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if bias: self.bias = Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj): support = torch.mm(input, self.weight) output = torch.mm(adj, support) if self.bias is not None: return output + self.bias else: return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GCNNew(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout): super(GCNNew, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nclass) self.dropout = dropout def forward(self, input_0, input_1): primals_1 = self.gc1.weight primals_4 = self.gc1.bias primals_2 = self.gc2.weight primals_6 = self.gc2.bias primals_3 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
Anou9531/GUA
GCN
false
7,715
[ "MIT" ]
20
354acceb69656e76fb4ee296c66ae42c18cd939f
https://github.com/Anou9531/GUA/tree/354acceb69656e76fb4ee296c66ae42c18cd939f
MSELoss
import functools import torch import torch.nn as nn import torch.nn.functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def mse_loss(pred, target): """Warpper of mse loss.""" return F.mse_loss(pred, target, reduction='none') class MSELoss(nn.Module): """MSELoss. Args: reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of the loss. Defaults to 1.0 """ def __init__(self, reduction='mean', loss_weight=1.0): super().__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None): """Forward function of loss. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. weight (torch.Tensor, optional): Weight of the loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. Returns: torch.Tensor: The calculated loss """ loss = self.loss_weight * mse_loss(pred, target, weight, reduction= self.reduction, avg_factor=avg_factor) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import functools import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_mse_loss_mul_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def mse_loss(pred, target): """Warpper of mse loss.""" return F.mse_loss(pred, target, reduction='none') class MSELossNew(nn.Module): """MSELoss. Args: reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of the loss. Defaults to 1.0 """ def __init__(self, reduction='mean', loss_weight=1.0): super().__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Andrew-Zhu/DyFPN
MSELoss
false
7,716
[ "Apache-2.0" ]
32
a74463b59c4ce28253c2449a07c0f6692a0147a1
https://github.com/Andrew-Zhu/DyFPN/tree/a74463b59c4ce28253c2449a07c0f6692a0147a1
BothContextGate
import torch import torch.nn as nn import torch.cuda class ContextGate(nn.Module): """ Context gate is a decoder module that takes as input the previous word embedding, the current decoder state and the attention state, and produces a gate. The gate can be used to select the input from the target side context (decoder state), from the source context (attention state) or both. """ def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(ContextGate, self).__init__() input_size = embeddings_size + decoder_size + attention_size self.gate = nn.Linear(input_size, output_size, bias=True) self.sig = nn.Sigmoid() self.source_proj = nn.Linear(attention_size, output_size) self.target_proj = nn.Linear(embeddings_size + decoder_size, output_size) def forward(self, prev_emb, dec_state, attn_state): input_tensor = torch.cat((prev_emb, dec_state, attn_state), dim=1) z = self.sig(self.gate(input_tensor)) proj_source = self.source_proj(attn_state) proj_target = self.target_proj(torch.cat((prev_emb, dec_state), dim=1)) return z, proj_source, proj_target class BothContextGate(nn.Module): """Apply the context gate to both contexts""" def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(BothContextGate, self).__init__() self.context_gate = ContextGate(embeddings_size, decoder_size, attention_size, output_size) self.tanh = nn.Tanh() def forward(self, prev_emb, dec_state, attn_state): z, source, target = self.context_gate(prev_emb, dec_state, attn_state) return self.tanh((1.0 - z) * target + z * source) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'embeddings_size': 4, 'decoder_size': 4, 'attention_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.cuda assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp4 = tl.load(in_ptr1 + x0, xmask) tmp6 = tl.load(in_ptr2 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = 1.0 tmp3 = tmp2 - tmp1 tmp5 = tmp3 * tmp4 tmp7 = tmp1 * tmp6 tmp8 = tmp5 + tmp7 tmp9 = libdevice.tanh(tmp8) tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 12), (12, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 8), (8, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(48)](primals_1, primals_2, primals_3, buf0, 48, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, primals_3, reinterpret_tensor( primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_cat_1[grid(32)](primals_1, primals_2, buf3, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, buf3, reinterpret_tensor(primals_8, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf4) del primals_8 del primals_9 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_rsub_sigmoid_tanh_2[grid(16)](buf1, buf4, buf2, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf5, primals_3, buf0, buf1, buf2, buf3, buf4, buf5 class ContextGate(nn.Module): """ Context gate is a decoder module that takes as input the previous word embedding, the current decoder state and the attention state, and produces a gate. The gate can be used to select the input from the target side context (decoder state), from the source context (attention state) or both. """ def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(ContextGate, self).__init__() input_size = embeddings_size + decoder_size + attention_size self.gate = nn.Linear(input_size, output_size, bias=True) self.sig = nn.Sigmoid() self.source_proj = nn.Linear(attention_size, output_size) self.target_proj = nn.Linear(embeddings_size + decoder_size, output_size) def forward(self, prev_emb, dec_state, attn_state): input_tensor = torch.cat((prev_emb, dec_state, attn_state), dim=1) z = self.sig(self.gate(input_tensor)) proj_source = self.source_proj(attn_state) proj_target = self.target_proj(torch.cat((prev_emb, dec_state), dim=1)) return z, proj_source, proj_target class BothContextGateNew(nn.Module): """Apply the context gate to both contexts""" def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(BothContextGateNew, self).__init__() self.context_gate = ContextGate(embeddings_size, decoder_size, attention_size, output_size) self.tanh = nn.Tanh() def forward(self, input_0, input_1, input_2): primals_4 = self.context_gate.gate.weight primals_5 = self.context_gate.gate.bias primals_1 = self.context_gate.source_proj.weight primals_7 = self.context_gate.source_proj.bias primals_8 = self.context_gate.target_proj.weight primals_9 = self.context_gate.target_proj.bias primals_2 = input_0 primals_3 = input_1 primals_6 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
AndrewM1998/MultimodalNMT
BothContextGate
false
7,717
[ "MIT" ]
40
b66d3a40ac9bc5c11ae124f51d1a9abf7cd6a04b
https://github.com/AndrewM1998/MultimodalNMT/tree/b66d3a40ac9bc5c11ae124f51d1a9abf7cd6a04b
ContextGate
import torch import torch.nn as nn import torch.cuda class ContextGate(nn.Module): """ Context gate is a decoder module that takes as input the previous word embedding, the current decoder state and the attention state, and produces a gate. The gate can be used to select the input from the target side context (decoder state), from the source context (attention state) or both. """ def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(ContextGate, self).__init__() input_size = embeddings_size + decoder_size + attention_size self.gate = nn.Linear(input_size, output_size, bias=True) self.sig = nn.Sigmoid() self.source_proj = nn.Linear(attention_size, output_size) self.target_proj = nn.Linear(embeddings_size + decoder_size, output_size) def forward(self, prev_emb, dec_state, attn_state): input_tensor = torch.cat((prev_emb, dec_state, attn_state), dim=1) z = self.sig(self.gate(input_tensor)) proj_source = self.source_proj(attn_state) proj_target = self.target_proj(torch.cat((prev_emb, dec_state), dim=1)) return z, proj_source, proj_target def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'embeddings_size': 4, 'decoder_size': 4, 'attention_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.cuda assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 12), (12, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 8), (8, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(48)](primals_1, primals_2, primals_3, buf0, 48, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (12, 4), (1, 12), 0), out=buf1) del primals_4 buf2 = buf1 del buf1 triton_poi_fused_sigmoid_1[grid(16)](buf2, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, primals_3, reinterpret_tensor( primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_6 del primals_7 buf4 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_cat_2[grid(32)](primals_1, primals_2, buf4, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, buf4, reinterpret_tensor(primals_8, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf5) del primals_8 del primals_9 return buf2, buf3, buf5, primals_3, buf0, buf2, buf4 class ContextGateNew(nn.Module): """ Context gate is a decoder module that takes as input the previous word embedding, the current decoder state and the attention state, and produces a gate. The gate can be used to select the input from the target side context (decoder state), from the source context (attention state) or both. """ def __init__(self, embeddings_size, decoder_size, attention_size, output_size): super(ContextGateNew, self).__init__() input_size = embeddings_size + decoder_size + attention_size self.gate = nn.Linear(input_size, output_size, bias=True) self.sig = nn.Sigmoid() self.source_proj = nn.Linear(attention_size, output_size) self.target_proj = nn.Linear(embeddings_size + decoder_size, output_size) def forward(self, input_0, input_1, input_2): primals_4 = self.gate.weight primals_5 = self.gate.bias primals_1 = self.source_proj.weight primals_7 = self.source_proj.bias primals_8 = self.target_proj.weight primals_9 = self.target_proj.bias primals_2 = input_0 primals_3 = input_1 primals_6 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1], output[2]
AndrewM1998/MultimodalNMT
ContextGate
false
7,718
[ "MIT" ]
40
b66d3a40ac9bc5c11ae124f51d1a9abf7cd6a04b
https://github.com/AndrewM1998/MultimodalNMT/tree/b66d3a40ac9bc5c11ae124f51d1a9abf7cd6a04b
LandmarkLoss
import math import torch from torch import nn def wing_loss(y_true, y_pred, N_LANDMARK, w=10.0, epsilon=2.0): y_pred = y_pred.reshape(-1, N_LANDMARK, 2) y_true = y_true.reshape(-1, N_LANDMARK, 2) x = y_true - y_pred c = w * (1.0 - math.log(1.0 + w / epsilon)) absolute_x = torch.abs(x) losses = torch.where(w > absolute_x, w * torch.log(1.0 + absolute_x / epsilon), absolute_x - c) loss = torch.mean(torch.sum(losses, axis=[1, 2]), axis=0) return loss class LandmarkLoss(nn.Module): def __init__(self, n_landmark=98): super(LandmarkLoss, self).__init__() self.n_landmark = n_landmark def forward(self, landmark_gt, landmark_pred): loss = wing_loss(landmark_gt, landmark_pred, N_LANDMARK=self.n_landmark ) return loss def get_inputs(): return [torch.rand([4, 98, 2]), torch.rand([4, 98, 2])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 rnumel = 196 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 196 * x0), rmask & xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 196 * x0), rmask & xmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 10.0 tmp5 = tmp3 < tmp4 tmp6 = 0.5 tmp7 = tmp3 * tmp6 tmp8 = 1.0 tmp9 = tmp7 + tmp8 tmp10 = tl_math.log(tmp9) tmp11 = tmp10 * tmp4 tmp12 = -7.91759469228055 tmp13 = tmp3 - tmp12 tmp14 = tl.where(tmp5, tmp11, tmp13) tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(rmask & xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tl.store(out_ptr0 + x0, tmp18, xmask) @triton.jit def triton_per_fused_mean_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp4 = 4.0 tmp5 = tmp3 / tmp4 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 98, 2), (196, 2, 1)) assert_size_stride(arg1_1, (4, 98, 2), (196, 2, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0[grid(4)](arg0_1 , arg1_1, buf0, 4, 196, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused_mean_1[grid(1)](buf2, buf0, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf0 return buf2, def wing_loss(y_true, y_pred, N_LANDMARK, w=10.0, epsilon=2.0): y_pred = y_pred.reshape(-1, N_LANDMARK, 2) y_true = y_true.reshape(-1, N_LANDMARK, 2) x = y_true - y_pred c = w * (1.0 - math.log(1.0 + w / epsilon)) absolute_x = torch.abs(x) losses = torch.where(w > absolute_x, w * torch.log(1.0 + absolute_x / epsilon), absolute_x - c) loss = torch.mean(torch.sum(losses, axis=[1, 2]), axis=0) return loss class LandmarkLossNew(nn.Module): def __init__(self, n_landmark=98): super(LandmarkLossNew, self).__init__() self.n_landmark = n_landmark def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
AnthonyF333/FaceLandmark_PFLD_UltraLight
LandmarkLoss
false
7,719
[ "Apache-2.0" ]
38
c7c9543bd7f44ab434240eab077242f259df21f8
https://github.com/AnthonyF333/FaceLandmark_PFLD_UltraLight/tree/c7c9543bd7f44ab434240eab077242f259df21f8
MFH
import torch from torch import nn from torch.nn import functional as F class MFH(nn.Module): def __init__(self, input_dims, output_dim, mm_dim=1200, factor=2, activ_input='relu', activ_output='relu', normalize=False, dropout_input=0.0, dropout_pre_lin=0.0, dropout_output=0.0): super(MFH, self).__init__() self.input_dims = input_dims self.output_dim = output_dim self.mm_dim = mm_dim self.factor = factor self.activ_input = activ_input self.activ_output = activ_output self.normalize = normalize self.dropout_input = dropout_input self.dropout_pre_lin = dropout_pre_lin self.dropout_output = dropout_output self.linear0_0 = nn.Linear(input_dims[0], mm_dim * factor) self.linear1_0 = nn.Linear(input_dims[1], mm_dim * factor) self.linear0_1 = nn.Linear(input_dims[0], mm_dim * factor) self.linear1_1 = nn.Linear(input_dims[1], mm_dim * factor) self.linear_out = nn.Linear(mm_dim * 2, output_dim) self.n_params = sum(p.numel() for p in self.parameters() if p. requires_grad) def forward(self, x): x0 = self.linear0_0(x[0]) x1 = self.linear1_0(x[1]) if self.activ_input: x0 = getattr(F, self.activ_input)(x0) x1 = getattr(F, self.activ_input)(x1) if self.dropout_input > 0: x0 = F.dropout(x0, p=self.dropout_input, training=self.training) x1 = F.dropout(x1, p=self.dropout_input, training=self.training) z_0_skip = x0 * x1 if self.dropout_pre_lin: z_0_skip = F.dropout(z_0_skip, p=self.dropout_pre_lin, training =self.training) z_0 = z_0_skip.view(z_0_skip.size(0), self.mm_dim, self.factor) z_0 = z_0.sum(2) if self.normalize: z_0 = torch.sqrt(F.relu(z_0)) - torch.sqrt(F.relu(-z_0)) z_0 = F.normalize(z_0, p=2) x0 = self.linear0_1(x[0]) x1 = self.linear1_1(x[1]) if self.activ_input: x0 = getattr(F, self.activ_input)(x0) x1 = getattr(F, self.activ_input)(x1) if self.dropout_input > 0: x0 = F.dropout(x0, p=self.dropout_input, training=self.training) x1 = F.dropout(x1, p=self.dropout_input, training=self.training) z_1 = x0 * x1 * z_0_skip if self.dropout_pre_lin > 0: z_1 = F.dropout(z_1, p=self.dropout_pre_lin, training=self.training ) z_1 = z_1.view(z_1.size(0), self.mm_dim, self.factor) z_1 = z_1.sum(2) if self.normalize: z_1 = torch.sqrt(F.relu(z_1)) - torch.sqrt(F.relu(-z_1)) z_1 = F.normalize(z_1, p=2) cat_dim = z_0.dim() - 1 z = torch.cat([z_0, z_1], cat_dim) z = self.linear_out(z) if self.activ_output: z = getattr(F, self.activ_output)(z) if self.dropout_output > 0: z = F.dropout(z, p=self.dropout_output, training=self.training) return z def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_dims': [4, 4], 'output_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 9600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2400 x1 = xindex // 2400 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1200, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (2 * x0 + 2400 * x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.full([1], 0, tl.int32) tmp7 = triton_helpers.maximum(tmp6, tmp5) tmp8 = tl.load(in_ptr1 + (2 * x0 + 2400 * x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = triton_helpers.maximum(tmp6, tmp8) tmp10 = tmp7 * tmp9 tmp11 = tl.load(in_ptr0 + (1 + 2 * x0 + 2400 * x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = triton_helpers.maximum(tmp6, tmp11) tmp13 = tl.load(in_ptr1 + (1 + 2 * x0 + 2400 * x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = triton_helpers.maximum(tmp6, tmp13) tmp15 = tmp12 * tmp14 tmp16 = tmp10 + tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp4, tmp16, tmp17) tmp19 = tmp0 >= tmp3 tl.full([1], 2400, tl.int64) tmp22 = tl.load(in_ptr2 + (2 * (-1200 + x0) + 2400 * x1), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = triton_helpers.maximum(tmp6, tmp22) tmp24 = tl.load(in_ptr3 + (2 * (-1200 + x0) + 2400 * x1), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = triton_helpers.maximum(tmp6, tmp24) tmp26 = tmp23 * tmp25 tmp27 = tl.load(in_ptr0 + (2 * (-1200 + x0) + 2400 * x1), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = triton_helpers.maximum(tmp6, tmp27) tmp29 = tl.load(in_ptr1 + (2 * (-1200 + x0) + 2400 * x1), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp30 = triton_helpers.maximum(tmp6, tmp29) tmp31 = tmp28 * tmp30 tmp32 = tmp26 * tmp31 tmp33 = tl.load(in_ptr2 + (1 + 2 * (-1200 + x0) + 2400 * x1), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp34 = triton_helpers.maximum(tmp6, tmp33) tmp35 = tl.load(in_ptr3 + (1 + 2 * (-1200 + x0) + 2400 * x1), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp36 = triton_helpers.maximum(tmp6, tmp35) tmp37 = tmp34 * tmp36 tmp38 = tl.load(in_ptr0 + (1 + 2 * (-1200 + x0) + 2400 * x1), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp39 = triton_helpers.maximum(tmp6, tmp38) tmp40 = tl.load(in_ptr1 + (1 + 2 * (-1200 + x0) + 2400 * x1), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp41 = triton_helpers.maximum(tmp6, tmp40) tmp42 = tmp39 * tmp41 tmp43 = tmp37 * tmp42 tmp44 = tmp32 + tmp43 tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype) tmp46 = tl.where(tmp19, tmp44, tmp45) tmp47 = tl.where(tmp4, tmp18, tmp46) tl.store(out_ptr0 + x2, tmp47, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (2400, 4), (4, 1)) assert_size_stride(primals_3, (2400,), (1,)) assert_size_stride(primals_4, (2400, 4), (4, 1)) assert_size_stride(primals_5, (2400,), (1,)) assert_size_stride(primals_6, (2400, 4), (4, 1)) assert_size_stride(primals_7, (2400,), (1,)) assert_size_stride(primals_8, (2400, 4), (4, 1)) assert_size_stride(primals_9, (2400,), (1,)) assert_size_stride(primals_10, (4, 2400), (2400, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2400), (2400, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (4, 4 ), (4, 1), 0), reinterpret_tensor(primals_2, (4, 2400), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 2400), (2400, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (4, 4 ), (4, 1), 16), reinterpret_tensor(primals_4, (4, 2400), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 2400), (2400, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(primals_1, (4, 4 ), (4, 1), 0), reinterpret_tensor(primals_6, (4, 2400), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = empty_strided_cuda((4, 2400), (2400, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(primals_1, (4, 4 ), (4, 1), 16), reinterpret_tensor(primals_8, (4, 2400), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_8 del primals_9 buf4 = empty_strided_cuda((4, 2400), (2400, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(9600)](buf0, buf1, buf2, buf3, buf4, 9600, XBLOCK=128, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf4, reinterpret_tensor(primals_10, (2400, 4), ( 1, 2400), 0), out=buf5) buf6 = buf5 del buf5 buf7 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(16)](buf6, primals_11, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_11 return buf6, reinterpret_tensor(primals_1, (4, 4), (4, 1), 0 ), buf0, reinterpret_tensor(primals_1, (4, 4), (4, 1), 16 ), buf1, buf2, buf3, buf4, buf7, primals_10 class MFHNew(nn.Module): def __init__(self, input_dims, output_dim, mm_dim=1200, factor=2, activ_input='relu', activ_output='relu', normalize=False, dropout_input=0.0, dropout_pre_lin=0.0, dropout_output=0.0): super(MFHNew, self).__init__() self.input_dims = input_dims self.output_dim = output_dim self.mm_dim = mm_dim self.factor = factor self.activ_input = activ_input self.activ_output = activ_output self.normalize = normalize self.dropout_input = dropout_input self.dropout_pre_lin = dropout_pre_lin self.dropout_output = dropout_output self.linear0_0 = nn.Linear(input_dims[0], mm_dim * factor) self.linear1_0 = nn.Linear(input_dims[1], mm_dim * factor) self.linear0_1 = nn.Linear(input_dims[0], mm_dim * factor) self.linear1_1 = nn.Linear(input_dims[1], mm_dim * factor) self.linear_out = nn.Linear(mm_dim * 2, output_dim) self.n_params = sum(p.numel() for p in self.parameters() if p. requires_grad) def forward(self, input_0): primals_2 = self.linear0_0.weight primals_3 = self.linear0_0.bias primals_4 = self.linear1_0.weight primals_5 = self.linear1_0.bias primals_6 = self.linear0_1.weight primals_7 = self.linear0_1.bias primals_8 = self.linear1_1.weight primals_9 = self.linear1_1.bias primals_10 = self.linear_out.weight primals_11 = self.linear_out.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
AndresPMD/GCN_classification
MFH
false
7,720
[ "MIT" ]
39
b005c4256d68f1f90a7f73e7fdb3d066448de28c
https://github.com/AndresPMD/GCN_classification/tree/b005c4256d68f1f90a7f73e7fdb3d066448de28c
LinearSum
import torch from torch import nn from torch.nn import functional as F class LinearSum(nn.Module): def __init__(self, input_dims, output_dim, mm_dim=1200, activ_input= 'relu', activ_output='relu', normalize=False, dropout_input=0.0, dropout_pre_lin=0.0, dropout_output=0.0): super(LinearSum, self).__init__() self.input_dims = input_dims self.output_dim = output_dim self.mm_dim = mm_dim self.activ_input = activ_input self.activ_output = activ_output self.normalize = normalize self.dropout_input = dropout_input self.dropout_pre_lin = dropout_pre_lin self.dropout_output = dropout_output self.linear0 = nn.Linear(input_dims[0], mm_dim) self.linear1 = nn.Linear(input_dims[1], mm_dim) self.linear_out = nn.Linear(mm_dim, output_dim) self.n_params = sum(p.numel() for p in self.parameters() if p. requires_grad) def forward(self, x): x0 = self.linear0(x[0]) x1 = self.linear1(x[1]) if self.activ_input: x0 = getattr(F, self.activ_input)(x0) x1 = getattr(F, self.activ_input)(x1) if self.dropout_input > 0: x0 = F.dropout(x0, p=self.dropout_input, training=self.training) x1 = F.dropout(x1, p=self.dropout_input, training=self.training) z = x0 + x1 if self.normalize: z = torch.sqrt(F.relu(z)) - torch.sqrt(F.relu(-z)) z = F.normalize(z, p=2) if self.dropout_pre_lin > 0: z = F.dropout(z, p=self.dropout_pre_lin, training=self.training) z = self.linear_out(z) if self.activ_output: z = getattr(F, self.activ_output)(z) if self.dropout_output > 0: z = F.dropout(z, p=self.dropout_output, training=self.training) return z def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dims': [4, 4], 'output_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_relu_threshold_backward_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl. constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 1200 x1 = xindex // 1200 tmp0 = tl.load(in_ptr0 + (x0 + 1216 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (x0 + 1216 * x1), xmask) tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp7 = tmp5 + tmp6 tmp8 = triton_helpers.maximum(tmp3, tmp7) tmp9 = tmp4 + tmp8 tmp10 = 0.0 tmp11 = tmp8 <= tmp10 tmp12 = tmp4 <= tmp10 tl.store(out_ptr0 + (x0 + 1216 * x1), tmp9, xmask) tl.store(out_ptr1 + (x0 + 1280 * x1), tmp11, xmask) tl.store(out_ptr2 + (x0 + 1280 * x1), tmp12, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1200, 4), (4, 1)) assert_size_stride(primals_3, (1200,), (1,)) assert_size_stride(primals_4, (1200, 4), (4, 1)) assert_size_stride(primals_5, (1200,), (1,)) assert_size_stride(primals_6, (4, 1200), (1200, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 1200), (1216, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1200), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 1200), (1216, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 64 ), reinterpret_tensor(primals_4, (4, 1200), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4, 1200), (4864, 1216, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 1200), (5120, 1280, 1), torch.bool) buf7 = empty_strided_cuda((4, 4, 1200), (5120, 1280, 1), torch.bool) get_raw_stream(0) triton_poi_fused_add_relu_threshold_backward_0[grid(19200)](buf0, primals_3, buf1, primals_5, buf2, buf6, buf7, 19200, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del buf1 del primals_3 del primals_5 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 1200), (1216, 1), 0 ), reinterpret_tensor(primals_6, (1200, 4), (1, 1200), 0), out=buf3 ) buf4 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0) del buf3 buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(64)](buf4, primals_7, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 return buf4, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 64 ), reinterpret_tensor(buf2, (16, 1200), (1216, 1), 0 ), buf5, primals_6, buf6, buf7 class LinearSumNew(nn.Module): def __init__(self, input_dims, output_dim, mm_dim=1200, activ_input= 'relu', activ_output='relu', normalize=False, dropout_input=0.0, dropout_pre_lin=0.0, dropout_output=0.0): super(LinearSumNew, self).__init__() self.input_dims = input_dims self.output_dim = output_dim self.mm_dim = mm_dim self.activ_input = activ_input self.activ_output = activ_output self.normalize = normalize self.dropout_input = dropout_input self.dropout_pre_lin = dropout_pre_lin self.dropout_output = dropout_output self.linear0 = nn.Linear(input_dims[0], mm_dim) self.linear1 = nn.Linear(input_dims[1], mm_dim) self.linear_out = nn.Linear(mm_dim, output_dim) self.n_params = sum(p.numel() for p in self.parameters() if p. requires_grad) def forward(self, input_0): primals_2 = self.linear0.weight primals_3 = self.linear0.bias primals_4 = self.linear1.weight primals_5 = self.linear1.bias primals_6 = self.linear_out.weight primals_7 = self.linear_out.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
AndresPMD/GCN_classification
LinearSum
false
7,721
[ "MIT" ]
39
b005c4256d68f1f90a7f73e7fdb3d066448de28c
https://github.com/AndresPMD/GCN_classification/tree/b005c4256d68f1f90a7f73e7fdb3d066448de28c
L1Loss
import functools import torch import torch.nn as nn import torch.nn.functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def l1_loss(pred, target): """L1 loss. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. Returns: torch.Tensor: Calculated loss """ assert pred.size() == target.size() and target.numel() > 0 loss = torch.abs(pred - target) return loss class L1Loss(nn.Module): """L1 loss. Args: reduction (str, optional): The method to reduce the loss. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of loss. """ def __init__(self, reduction='mean', loss_weight=1.0): super(L1Loss, self).__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) loss_bbox = self.loss_weight * l1_loss(pred, target, weight, reduction=reduction, avg_factor=avg_factor) return loss_bbox def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import functools import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_mean_mul_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def l1_loss(pred, target): """L1 loss. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. Returns: torch.Tensor: Calculated loss """ assert pred.size() == target.size() and target.numel() > 0 loss = torch.abs(pred - target) return loss class L1LossNew(nn.Module): """L1 loss. Args: reduction (str, optional): The method to reduce the loss. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of loss. """ def __init__(self, reduction='mean', loss_weight=1.0): super(L1LossNew, self).__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Andrew-Zhu/DyFPN
L1Loss
false
7,722
[ "Apache-2.0" ]
32
a74463b59c4ce28253c2449a07c0f6692a0147a1
https://github.com/Andrew-Zhu/DyFPN/tree/a74463b59c4ce28253c2449a07c0f6692a0147a1
DeconvBlock
import torch import torch.nn as nn class DeconvBlock(nn.Module): def __init__(self, in_channels, out_channels): super(DeconvBlock, self).__init__() self.conv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=3, stride=2, padding=1, output_padding=0) self.pad = nn.ReflectionPad2d((0, 1, 0, 1)) self.nonlin = nn.ELU(inplace=True) def forward(self, x): out = self.conv(x) out = self.pad(out) out = self.nonlin(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_elu_reflection_pad2d_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 % 8 x4 = xindex // 64 x2 = xindex // 64 % 4 x5 = xindex tmp0 = tl.load(in_ptr0 + (48 + -1 * tl_math.abs(-6 + x0) + -7 * tl_math .abs(-6 + x1) + 49 * x4), xmask) tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(out_ptr0 + x5, tmp9, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 7, 7), (196, 49, 7, 1)) buf1 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_elu_reflection_pad2d_0[grid(1024)](buf0, primals_2, buf1, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 return buf1, primals_1, primals_3, buf1 class DeconvBlockNew(nn.Module): def __init__(self, in_channels, out_channels): super(DeconvBlockNew, self).__init__() self.conv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=3, stride=2, padding=1, output_padding=0) self.pad = nn.ReflectionPad2d((0, 1, 0, 1)) self.nonlin = nn.ELU(inplace=True) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ArminMasoumian/GCNDepth
DeconvBlock
false
7,723
[ "MIT" ]
32
9fa77812fa944c2701a45f09acf988815ca50aee
https://github.com/ArminMasoumian/GCNDepth/tree/9fa77812fa944c2701a45f09acf988815ca50aee
ConvBlock
import torch import torch.nn as nn class Conv3x3(nn.Module): def __init__(self, in_channels, out_channels, use_refl=True): super(Conv3x3, self).__init__() if use_refl: self.pad = nn.ReflectionPad2d(1) else: self.pad = nn.ZeroPad2d(1) self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3) def forward(self, x): out = self.pad(x) out = self.conv(out) return out class ConvBlock(nn.Module): def __init__(self, in_channels, out_channels): super(ConvBlock, self).__init__() self.conv = Conv3x3(in_channels, out_channels) self.nonlin = nn.ELU(inplace=True) def forward(self, x): out = self.conv(x) out = self.nonlin(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_elu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp9, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_elu_1[grid(256)](buf2, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0, buf2 class Conv3x3(nn.Module): def __init__(self, in_channels, out_channels, use_refl=True): super(Conv3x3, self).__init__() if use_refl: self.pad = nn.ReflectionPad2d(1) else: self.pad = nn.ZeroPad2d(1) self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3) def forward(self, x): out = self.pad(x) out = self.conv(out) return out class ConvBlockNew(nn.Module): def __init__(self, in_channels, out_channels): super(ConvBlockNew, self).__init__() self.conv = Conv3x3(in_channels, out_channels) self.nonlin = nn.ELU(inplace=True) def forward(self, input_0): primals_2 = self.conv.conv.weight primals_3 = self.conv.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ArminMasoumian/GCNDepth
ConvBlock
false
7,724
[ "MIT" ]
32
9fa77812fa944c2701a45f09acf988815ca50aee
https://github.com/ArminMasoumian/GCNDepth/tree/9fa77812fa944c2701a45f09acf988815ca50aee
CrossEntropyLoss
import torch import torch.nn as nn import torch.nn.functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def _expand_binary_labels(labels, label_weights, label_channels): bin_labels = labels.new_full((labels.size(0), label_channels), 0) inds = torch.nonzero(labels >= 1, as_tuple=False).squeeze() if inds.numel() > 0: bin_labels[inds, labels[inds] - 1] = 1 if label_weights is None: bin_label_weights = None else: bin_label_weights = label_weights.view(-1, 1).expand(label_weights. size(0), label_channels) return bin_labels, bin_label_weights def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None): """Calculate the binary CrossEntropy loss. Args: pred (torch.Tensor): The prediction with shape (N, 1). label (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (list[float], optional): The weight for each class. Returns: torch.Tensor: The calculated loss """ if pred.dim() != label.dim(): label, weight = _expand_binary_labels(label, weight, pred.size(-1)) if weight is not None: weight = weight.float() loss = F.binary_cross_entropy_with_logits(pred, label.float(), weight= class_weight, reduction='none') loss = weight_reduce_loss(loss, weight, reduction=reduction, avg_factor =avg_factor) return loss def cross_entropy(pred, label, weight=None, reduction='mean', avg_factor= None, class_weight=None): """Calculate the CrossEntropy loss. Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes. label (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. reduction (str, optional): The method used to reduce the loss. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (list[float], optional): The weight for each class. Returns: torch.Tensor: The calculated loss """ loss = F.cross_entropy(pred, label, weight=class_weight, reduction='none') if weight is not None: weight = weight.float() loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor= None, class_weight=None): """Calculate the CrossEntropy loss for masks. Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes. target (torch.Tensor): The learning label of the prediction. label (torch.Tensor): ``label`` indicates the class label of the mask' corresponding object. This will be used to select the mask in the of the class which the object belongs to when the mask prediction if not class-agnostic. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (list[float], optional): The weight for each class. Returns: torch.Tensor: The calculated loss """ assert reduction == 'mean' and avg_factor is None num_rois = pred.size()[0] inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) pred_slice = pred[inds, label].squeeze(1) return F.binary_cross_entropy_with_logits(pred_slice, target, weight= class_weight, reduction='mean')[None] class CrossEntropyLoss(nn.Module): def __init__(self, use_sigmoid=False, use_mask=False, reduction='mean', class_weight=None, loss_weight=1.0): """CrossEntropyLoss. Args: use_sigmoid (bool, optional): Whether the prediction uses sigmoid of softmax. Defaults to False. use_mask (bool, optional): Whether to use mask cross entropy loss. Defaults to False. reduction (str, optional): . Defaults to 'mean'. Options are "none", "mean" and "sum". class_weight (list[float], optional): Weight of each class. Defaults to None. loss_weight (float, optional): Weight of the loss. Defaults to 1.0. """ super(CrossEntropyLoss, self).__init__() assert use_sigmoid is False or use_mask is False self.use_sigmoid = use_sigmoid self.use_mask = use_mask self.reduction = reduction self.loss_weight = loss_weight self.class_weight = class_weight if self.use_sigmoid: self.cls_criterion = binary_cross_entropy elif self.use_mask: self.cls_criterion = mask_cross_entropy else: self.cls_criterion = cross_entropy def forward(self, cls_score, label, weight=None, avg_factor=None, reduction_override=None, **kwargs): """Forward function. Args: cls_score (torch.Tensor): The prediction. label (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". Returns: torch.Tensor: The calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) if self.class_weight is not None: class_weight = cls_score.new_tensor(self.class_weight) else: class_weight = None loss_cls = self.loss_weight * self.cls_criterion(cls_score, label, weight, class_weight=class_weight, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss_cls def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp13 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp16 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp20 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp24 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 + tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 - tmp11 tmp14 = tmp12 * tmp13 tmp15 = tmp2 - tmp11 tmp17 = tmp15 * tmp16 tmp18 = tmp14 + tmp17 tmp19 = tmp5 - tmp11 tmp21 = tmp19 * tmp20 tmp22 = tmp18 + tmp21 tmp23 = tmp8 - tmp11 tmp25 = tmp23 * tmp24 tmp26 = tmp22 + tmp25 tmp27 = -tmp26 tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp30 = tl.sum(tmp28, 1)[:, None] tmp31 = 64.0 tmp32 = tmp30 / tmp31 tmp33 = 1.0 tmp34 = tmp32 * tmp33 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp34, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused__log_softmax_mean_mul_neg_sum_1[grid(1)](buf2, buf0, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg1_1 del buf0 return buf2, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def _expand_binary_labels(labels, label_weights, label_channels): bin_labels = labels.new_full((labels.size(0), label_channels), 0) inds = torch.nonzero(labels >= 1, as_tuple=False).squeeze() if inds.numel() > 0: bin_labels[inds, labels[inds] - 1] = 1 if label_weights is None: bin_label_weights = None else: bin_label_weights = label_weights.view(-1, 1).expand(label_weights. size(0), label_channels) return bin_labels, bin_label_weights def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None): """Calculate the binary CrossEntropy loss. Args: pred (torch.Tensor): The prediction with shape (N, 1). label (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (list[float], optional): The weight for each class. Returns: torch.Tensor: The calculated loss """ if pred.dim() != label.dim(): label, weight = _expand_binary_labels(label, weight, pred.size(-1)) if weight is not None: weight = weight.float() loss = F.binary_cross_entropy_with_logits(pred, label.float(), weight= class_weight, reduction='none') loss = weight_reduce_loss(loss, weight, reduction=reduction, avg_factor =avg_factor) return loss def cross_entropy(pred, label, weight=None, reduction='mean', avg_factor= None, class_weight=None): """Calculate the CrossEntropy loss. Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes. label (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. reduction (str, optional): The method used to reduce the loss. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (list[float], optional): The weight for each class. Returns: torch.Tensor: The calculated loss """ loss = F.cross_entropy(pred, label, weight=class_weight, reduction='none') if weight is not None: weight = weight.float() loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor= None, class_weight=None): """Calculate the CrossEntropy loss for masks. Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes. target (torch.Tensor): The learning label of the prediction. label (torch.Tensor): ``label`` indicates the class label of the mask' corresponding object. This will be used to select the mask in the of the class which the object belongs to when the mask prediction if not class-agnostic. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (list[float], optional): The weight for each class. Returns: torch.Tensor: The calculated loss """ assert reduction == 'mean' and avg_factor is None num_rois = pred.size()[0] inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) pred_slice = pred[inds, label].squeeze(1) return F.binary_cross_entropy_with_logits(pred_slice, target, weight= class_weight, reduction='mean')[None] class CrossEntropyLossNew(nn.Module): def __init__(self, use_sigmoid=False, use_mask=False, reduction='mean', class_weight=None, loss_weight=1.0): """CrossEntropyLoss. Args: use_sigmoid (bool, optional): Whether the prediction uses sigmoid of softmax. Defaults to False. use_mask (bool, optional): Whether to use mask cross entropy loss. Defaults to False. reduction (str, optional): . Defaults to 'mean'. Options are "none", "mean" and "sum". class_weight (list[float], optional): Weight of each class. Defaults to None. loss_weight (float, optional): Weight of the loss. Defaults to 1.0. """ super(CrossEntropyLossNew, self).__init__() assert use_sigmoid is False or use_mask is False self.use_sigmoid = use_sigmoid self.use_mask = use_mask self.reduction = reduction self.loss_weight = loss_weight self.class_weight = class_weight if self.use_sigmoid: self.cls_criterion = binary_cross_entropy elif self.use_mask: self.cls_criterion = mask_cross_entropy else: self.cls_criterion = cross_entropy def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Andrew-Zhu/DyFPN
CrossEntropyLoss
false
7,725
[ "Apache-2.0" ]
32
a74463b59c4ce28253c2449a07c0f6692a0147a1
https://github.com/Andrew-Zhu/DyFPN/tree/a74463b59c4ce28253c2449a07c0f6692a0147a1
SSIM
import torch import torch.nn as nn class SSIM(nn.Module): def __init__(self): super(SSIM, self).__init__() self.mu_x_pool = nn.AvgPool2d(3, 1) self.mu_y_pool = nn.AvgPool2d(3, 1) self.sig_x_pool = nn.AvgPool2d(3, 1) self.sig_y_pool = nn.AvgPool2d(3, 1) self.sig_xy_pool = nn.AvgPool2d(3, 1) self.refl = nn.ReflectionPad2d(1) self.C1 = 0.01 ** 2 self.C2 = 0.03 ** 2 def forward(self, x, y): x = self.refl(x) y = self.refl(y) mu_x = self.mu_x_pool(x) mu_y = self.mu_y_pool(y) sigma_x = self.sig_x_pool(x ** 2) - mu_x ** 2 sigma_y = self.sig_y_pool(y ** 2) - mu_y ** 2 sigma_xy = self.sig_xy_pool(x * y) - mu_x * mu_y SSIM_n = (2 * mu_x * mu_y + self.C1) * (2 * sigma_xy + self.C2) SSIM_d = (mu_x ** 2 + mu_y ** 2 + self.C1) * (sigma_x + sigma_y + self.C2) return torch.clamp((1 - SSIM_n / SSIM_d) / 2, 0, 1) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_reflection_pad2d_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 6 * x1 + 36 * x2), xmask) tmp1 = tl.load(in_ptr0 + (1 + x0 + 6 * x1 + 36 * x2), xmask) tmp3 = tl.load(in_ptr0 + (2 + x0 + 6 * x1 + 36 * x2), xmask) tmp5 = tl.load(in_ptr0 + (6 + x0 + 6 * x1 + 36 * x2), xmask) tmp7 = tl.load(in_ptr0 + (7 + x0 + 6 * x1 + 36 * x2), xmask) tmp9 = tl.load(in_ptr0 + (8 + x0 + 6 * x1 + 36 * x2), xmask) tmp11 = tl.load(in_ptr0 + (12 + x0 + 6 * x1 + 36 * x2), xmask) tmp13 = tl.load(in_ptr0 + (13 + x0 + 6 * x1 + 36 * x2), xmask) tmp15 = tl.load(in_ptr0 + (14 + x0 + 6 * x1 + 36 * x2), xmask) tmp19 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask) tmp22 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-2 + x0) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask) tmp24 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask, eviction_policy ='evict_last') tmp26 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask) tmp28 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-2 + x0) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask) tmp30 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask, eviction_policy ='evict_last') tmp32 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask) tmp34 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-2 + x0) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask) tmp55 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tmp56 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask) tmp58 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-2 + x0) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask) tmp60 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask, eviction_policy ='evict_last') tmp62 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask) tmp64 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-2 + x0) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask) tmp66 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask, eviction_policy ='evict_last') tmp68 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask) tmp70 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-2 + x0) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp17 = 0.1111111111111111 tmp18 = tmp16 * tmp17 tmp21 = tmp20 + tmp19 tmp23 = tmp22 + tmp21 tmp25 = tmp24 + tmp23 tmp27 = tmp26 + tmp25 tmp29 = tmp28 + tmp27 tmp31 = tmp30 + tmp29 tmp33 = tmp32 + tmp31 tmp35 = tmp34 + tmp33 tmp36 = tmp35 * tmp17 tmp37 = tmp19 * tmp19 tmp38 = tmp20 * tmp20 tmp39 = tmp38 + tmp37 tmp40 = tmp22 * tmp22 tmp41 = tmp40 + tmp39 tmp42 = tmp24 * tmp24 tmp43 = tmp42 + tmp41 tmp44 = tmp26 * tmp26 tmp45 = tmp44 + tmp43 tmp46 = tmp28 * tmp28 tmp47 = tmp46 + tmp45 tmp48 = tmp30 * tmp30 tmp49 = tmp48 + tmp47 tmp50 = tmp32 * tmp32 tmp51 = tmp50 + tmp49 tmp52 = tmp34 * tmp34 tmp53 = tmp52 + tmp51 tmp54 = tmp53 * tmp17 tmp57 = tmp56 + tmp55 tmp59 = tmp58 + tmp57 tmp61 = tmp60 + tmp59 tmp63 = tmp62 + tmp61 tmp65 = tmp64 + tmp63 tmp67 = tmp66 + tmp65 tmp69 = tmp68 + tmp67 tmp71 = tmp70 + tmp69 tmp72 = tmp71 * tmp17 tmp73 = tmp55 * tmp55 tmp74 = tmp56 * tmp56 tmp75 = tmp74 + tmp73 tmp76 = tmp58 * tmp58 tmp77 = tmp76 + tmp75 tmp78 = tmp60 * tmp60 tmp79 = tmp78 + tmp77 tmp80 = tmp62 * tmp62 tmp81 = tmp80 + tmp79 tmp82 = tmp64 * tmp64 tmp83 = tmp82 + tmp81 tmp84 = tmp66 * tmp66 tmp85 = tmp84 + tmp83 tmp86 = tmp68 * tmp68 tmp87 = tmp86 + tmp85 tmp88 = tmp70 * tmp70 tmp89 = tmp88 + tmp87 tmp90 = tmp89 * tmp17 tmp91 = 2.0 tmp92 = tmp36 * tmp91 tmp93 = tmp92 * tmp72 tmp94 = 0.0001 tmp95 = tmp93 + tmp94 tmp96 = tmp36 * tmp72 tmp97 = tmp18 - tmp96 tmp98 = tmp97 * tmp91 tmp99 = 0.0009 tmp100 = tmp98 + tmp99 tmp101 = tmp95 * tmp100 tmp102 = tmp36 * tmp36 tmp103 = tmp72 * tmp72 tmp104 = tmp102 + tmp103 tmp105 = tmp104 + tmp94 tmp106 = tmp54 - tmp102 tmp107 = tmp90 - tmp103 tmp108 = tmp106 + tmp107 tmp109 = tmp108 + tmp99 tmp110 = tmp105 * tmp109 tmp111 = tmp101 / tmp110 tmp112 = 1.0 tmp113 = tmp112 - tmp111 tmp114 = 0.5 tmp115 = tmp113 * tmp114 tmp116 = 0.0 tmp117 = triton_helpers.maximum(tmp115, tmp116) tmp118 = triton_helpers.minimum(tmp117, tmp112) tl.store(in_out_ptr0 + x3, tmp118, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_reflection_pad2d_0[grid(576)](arg0_1, arg1_1, buf2, 576, XBLOCK=128, num_warps=4, num_stages=1) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = buf0 del buf0 buf7 = buf6 del buf6 triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1[ grid(256)](buf7, buf2, arg0_1, arg1_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del buf2 return buf7, class SSIMNew(nn.Module): def __init__(self): super(SSIMNew, self).__init__() self.mu_x_pool = nn.AvgPool2d(3, 1) self.mu_y_pool = nn.AvgPool2d(3, 1) self.sig_x_pool = nn.AvgPool2d(3, 1) self.sig_y_pool = nn.AvgPool2d(3, 1) self.sig_xy_pool = nn.AvgPool2d(3, 1) self.refl = nn.ReflectionPad2d(1) self.C1 = 0.01 ** 2 self.C2 = 0.03 ** 2 def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ArminMasoumian/GCNDepth
SSIM
false
7,726
[ "MIT" ]
32
9fa77812fa944c2701a45f09acf988815ca50aee
https://github.com/ArminMasoumian/GCNDepth/tree/9fa77812fa944c2701a45f09acf988815ca50aee
Hardswish
import torch import torch.nn as nn import torch.nn.functional as F class Hardswish(nn.Module): @staticmethod def forward(x): return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 3.0 tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp0 * tmp6 tmp8 = 0.16666666666666666 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_hardtanh_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class HardswishNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AsakusaRinne/tensorrt_yolov5_tracker
Hardswish
false
7,727
[ "MIT" ]
22
b9a3a6fc94710e8291d6a614ed2b04cbc4c56599
https://github.com/AsakusaRinne/tensorrt_yolov5_tracker/tree/b9a3a6fc94710e8291d6a614ed2b04cbc4c56599
FlopsCrossEntropyLoss
import torch import torch.nn as nn import torch.nn.functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def flops_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None): """Calculate the CrossEntropy loss. Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes. label (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. reduction (str, optional): The method used to reduce the loss. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (list[float], optional): The weight for each class. Returns: torch.Tensor: The calculated loss """ loss = F.cross_entropy(pred, label, weight=class_weight, reduction='none') if weight is not None: weight = weight.float() loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss class FlopsCrossEntropyLoss(nn.Module): def __init__(self, use_sigmoid=False, use_mask=False, reduction='mean', class_weight=None, loss_weight=1.0): """CrossEntropyLoss. Args: use_sigmoid (bool, optional): Whether the prediction uses sigmoid of softmax. Defaults to False. use_mask (bool, optional): Whether to use mask cross entropy loss. Defaults to False. reduction (str, optional): . Defaults to 'mean'. Options are "none", "mean" and "sum". class_weight (list[float], optional): Weight of each class. Defaults to None. loss_weight (float, optional): Weight of the loss. Defaults to 1.0. """ super(FlopsCrossEntropyLoss, self).__init__() assert use_sigmoid is False or use_mask is False self.use_sigmoid = use_sigmoid self.use_mask = use_mask self.reduction = reduction self.loss_weight = loss_weight self.class_weight = class_weight self.cls_criterion = flops_cross_entropy def forward(self, loss_flops, cls_score, label, weight=None, avg_factor =None, reduction_override=None, **kwargs): """Forward function. Args: cls_score (torch.Tensor): The prediction. label (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". Returns: torch.Tensor: The calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) if self.class_weight is not None: class_weight = cls_score.new_tensor(self.class_weight) else: class_weight = None loss_cls = self.loss_weight * self.cls_criterion(cls_score, label, weight, class_weight=class_weight, reduction=reduction, avg_factor=avg_factor, **kwargs) loss_cls = loss_cls + loss_flops return loss_cls def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_mean_mul_neg_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp13 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp16 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp20 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp24 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 + tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 - tmp11 tmp14 = tmp12 * tmp13 tmp15 = tmp2 - tmp11 tmp17 = tmp15 * tmp16 tmp18 = tmp14 + tmp17 tmp19 = tmp5 - tmp11 tmp21 = tmp19 * tmp20 tmp22 = tmp18 + tmp21 tmp23 = tmp8 - tmp11 tmp25 = tmp23 * tmp24 tmp26 = tmp22 + tmp25 tmp27 = -tmp26 tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp30 = tl.sum(tmp28, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp30, None) @triton.jit def triton_poi_fused__log_softmax_add_mean_mul_neg_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp6 = tl.load(in_ptr1 + x0, xmask) tmp2 = 64.0 tmp3 = tmp1 / tmp2 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp7 = tmp5 + tmp6 tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((), (), torch.float32) triton_per_fused__log_softmax_mean_mul_neg_sum_1[grid(1)](buf0, arg1_1, buf1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg1_1 buf2 = buf0 del buf0 triton_poi_fused__log_softmax_add_mean_mul_neg_sum_2[grid(256)](buf1, arg2_1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg2_1 del buf1 return buf2, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def flops_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None): """Calculate the CrossEntropy loss. Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes. label (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. reduction (str, optional): The method used to reduce the loss. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (list[float], optional): The weight for each class. Returns: torch.Tensor: The calculated loss """ loss = F.cross_entropy(pred, label, weight=class_weight, reduction='none') if weight is not None: weight = weight.float() loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss class FlopsCrossEntropyLossNew(nn.Module): def __init__(self, use_sigmoid=False, use_mask=False, reduction='mean', class_weight=None, loss_weight=1.0): """CrossEntropyLoss. Args: use_sigmoid (bool, optional): Whether the prediction uses sigmoid of softmax. Defaults to False. use_mask (bool, optional): Whether to use mask cross entropy loss. Defaults to False. reduction (str, optional): . Defaults to 'mean'. Options are "none", "mean" and "sum". class_weight (list[float], optional): Weight of each class. Defaults to None. loss_weight (float, optional): Weight of the loss. Defaults to 1.0. """ super(FlopsCrossEntropyLossNew, self).__init__() assert use_sigmoid is False or use_mask is False self.use_sigmoid = use_sigmoid self.use_mask = use_mask self.reduction = reduction self.loss_weight = loss_weight self.class_weight = class_weight self.cls_criterion = flops_cross_entropy def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
Andrew-Zhu/DyFPN
FlopsCrossEntropyLoss
false
7,728
[ "Apache-2.0" ]
32
a74463b59c4ce28253c2449a07c0f6692a0147a1
https://github.com/Andrew-Zhu/DyFPN/tree/a74463b59c4ce28253c2449a07c0f6692a0147a1
GaussianFocalLoss
import functools import torch import torch.nn as nn import torch.nn.functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0): """`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian distribution. Args: pred (torch.Tensor): The prediction. gaussian_target (torch.Tensor): The learning target of the prediction in gaussian distribution. alpha (float, optional): A balanced form for Focal Loss. Defaults to 2.0. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 4.0. """ eps = 1e-12 pos_weights = gaussian_target.eq(1) neg_weights = (1 - gaussian_target).pow(gamma) pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights return pos_loss + neg_loss class GaussianFocalLoss(nn.Module): """GaussianFocalLoss is a variant of focal loss. More details can be found in the `paper <https://arxiv.org/abs/1808.01244>`_ Code is modified from `kp_utils.py <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L152>`_ # noqa: E501 Please notice that the target in GaussianFocalLoss is a gaussian heatmap, not 0/1 binary target. Args: alpha (float): Power of prediction. gamma (float): Power of target for negtive samples. reduction (str): Options are "none", "mean" and "sum". loss_weight (float): Loss weight of current loss. """ def __init__(self, alpha=2.0, gamma=4.0, reduction='mean', loss_weight=1.0 ): super(GaussianFocalLoss, self).__init__() self.alpha = alpha self.gamma = gamma self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction in gaussian distribution. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) loss_reg = self.loss_weight * gaussian_focal_loss(pred, target, weight, alpha=self.alpha, gamma=self.gamma, reduction=reduction, avg_factor=avg_factor) return loss_reg def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import functools import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_eq_log_mean_mul_neg_pow_rsub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp9 = tl.load(in_ptr1 + r0, None) tmp1 = 1e-12 tmp2 = tmp0 + tmp1 tmp3 = tl_math.log(tmp2) tmp4 = -tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp0 tmp7 = tmp6 * tmp6 tmp8 = tmp4 * tmp7 tmp10 = tmp9 == tmp5 tmp11 = tmp10.to(tl.float32) tmp12 = tmp8 * tmp11 tmp13 = tmp6 + tmp1 tmp14 = tl_math.log(tmp13) tmp15 = -tmp14 tmp16 = tmp0 * tmp0 tmp17 = tmp15 * tmp16 tmp18 = tmp5 - tmp9 tmp19 = tmp18 * tmp18 tmp20 = tmp19 * tmp19 tmp21 = tmp17 * tmp20 tmp22 = tmp12 + tmp21 tmp23 = tl.broadcast_to(tmp22, [RBLOCK]) tmp25 = triton_helpers.promote_to_tensor(tl.sum(tmp23, 0)) tmp26 = 256.0 tmp27 = tmp25 / tmp26 tmp28 = tmp27 * tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp28, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_eq_log_mean_mul_neg_pow_rsub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0): """`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian distribution. Args: pred (torch.Tensor): The prediction. gaussian_target (torch.Tensor): The learning target of the prediction in gaussian distribution. alpha (float, optional): A balanced form for Focal Loss. Defaults to 2.0. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 4.0. """ eps = 1e-12 pos_weights = gaussian_target.eq(1) neg_weights = (1 - gaussian_target).pow(gamma) pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights return pos_loss + neg_loss class GaussianFocalLossNew(nn.Module): """GaussianFocalLoss is a variant of focal loss. More details can be found in the `paper <https://arxiv.org/abs/1808.01244>`_ Code is modified from `kp_utils.py <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L152>`_ # noqa: E501 Please notice that the target in GaussianFocalLoss is a gaussian heatmap, not 0/1 binary target. Args: alpha (float): Power of prediction. gamma (float): Power of target for negtive samples. reduction (str): Options are "none", "mean" and "sum". loss_weight (float): Loss weight of current loss. """ def __init__(self, alpha=2.0, gamma=4.0, reduction='mean', loss_weight=1.0 ): super(GaussianFocalLossNew, self).__init__() self.alpha = alpha self.gamma = gamma self.reduction = reduction self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Andrew-Zhu/DyFPN
GaussianFocalLoss
false
7,729
[ "Apache-2.0" ]
32
a74463b59c4ce28253c2449a07c0f6692a0147a1
https://github.com/Andrew-Zhu/DyFPN/tree/a74463b59c4ce28253c2449a07c0f6692a0147a1
Conv3x3
import torch import torch.nn as nn class Conv3x3(nn.Module): def __init__(self, in_channels, out_channels, use_refl=True): super(Conv3x3, self).__init__() if use_refl: self.pad = nn.ReflectionPad2d(1) else: self.pad = nn.ZeroPad2d(1) self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3) def forward(self, x): out = self.pad(x) out = self.conv(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(256)](buf2, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0 class Conv3x3New(nn.Module): def __init__(self, in_channels, out_channels, use_refl=True): super(Conv3x3New, self).__init__() if use_refl: self.pad = nn.ReflectionPad2d(1) else: self.pad = nn.ZeroPad2d(1) self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ArminMasoumian/GCNDepth
Conv3x3
false
7,730
[ "MIT" ]
32
9fa77812fa944c2701a45f09acf988815ca50aee
https://github.com/ArminMasoumian/GCNDepth/tree/9fa77812fa944c2701a45f09acf988815ca50aee
Project
import torch import torch.nn as nn class Project(nn.Module): def __init__(self, batch_size, height, width, eps=1e-07): super(Project, self).__init__() self.batch_size = batch_size self.height = height self.width = width self.eps = eps def forward(self, points, K, T): P = torch.matmul(K, T)[:, :3, :] cam_points = torch.matmul(P, points) pix_coords = cam_points[:, :2, :] / (cam_points[:, 2, :].unsqueeze( 1) + self.eps) pix_coords = pix_coords.view(self.batch_size, 2, self.height, self. width) pix_coords = pix_coords.permute(0, 2, 3, 1) pix_coords[..., 0] /= self.width - 1 pix_coords[..., 1] /= self.height - 1 pix_coords = (pix_coords - 0.5) * 2 return pix_coords def get_inputs(): return [torch.rand([4, 3, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'batch_size': 4, 'height': 4, 'width': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 48 x1 = xindex // 48 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_mul_sub_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 2 x0 = xindex % 16 x2 = xindex // 32 x3 = xindex % 32 x4 = xindex tmp7 = tl.load(in_ptr0 + (x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (16 + x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp22 = tl.load(in_ptr0 + (x3 + 48 * x2), xmask) tmp0 = x1 tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp0 == tmp1 tmp3 = tmp1 == tmp1 tmp4 = tl.full([1], 0, tl.int32) tmp5 = tmp1 == tmp4 tmp6 = tmp4 == tmp4 tmp9 = 1e-07 tmp10 = tmp8 + tmp9 tmp11 = tmp7 / tmp10 tmp12 = 0.3333333333333333 tmp13 = tmp11 * tmp12 tmp14 = tl.where(tmp6, tmp13, tmp11) tmp16 = tmp15 / tmp10 tmp17 = tl.where(tmp5, tmp13, tmp16) tmp18 = tl.where(tmp5, tmp14, tmp17) tmp19 = tmp18 * tmp12 tmp20 = tl.where(tmp3, tmp19, tmp18) tmp21 = tmp0 == tmp4 tmp23 = tmp22 / tmp10 tmp24 = tl.where(tmp21, tmp13, tmp23) tmp25 = tl.where(tmp21, tmp14, tmp24) tmp26 = tl.where(tmp2, tmp19, tmp25) tmp27 = tl.where(tmp2, tmp20, tmp26) tmp28 = 0.5 tmp29 = tmp27 - tmp28 tmp30 = 2.0 tmp31 = tmp29 * tmp30 tl.store(out_ptr0 + x4, tmp31, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 3, 4, 4), (48, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(192)](buf0, buf1, 192, XBLOCK=128, num_warps=4, num_stages=1) del buf0 buf2 = empty_strided_cuda((12, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (12, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (12, 4, 4), (16, 4, 1), 0), out=buf2 ) del arg2_1 del buf1 buf3 = empty_strided_cuda((4, 4, 4, 2), (32, 4, 1, 16), torch.float32) triton_poi_fused_mul_sub_1[grid(128)](buf2, buf3, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf2 return buf3, class ProjectNew(nn.Module): def __init__(self, batch_size, height, width, eps=1e-07): super(ProjectNew, self).__init__() self.batch_size = batch_size self.height = height self.width = width self.eps = eps def forward(self, input_0, input_1, input_2): arg2_1 = input_0 arg0_1 = input_1 arg1_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
ArminMasoumian/GCNDepth
Project
false
7,731
[ "MIT" ]
32
9fa77812fa944c2701a45f09acf988815ca50aee
https://github.com/ArminMasoumian/GCNDepth/tree/9fa77812fa944c2701a45f09acf988815ca50aee
MakeFeatures
import torch import torch.nn as nn class MakeFeatures(nn.Module): """ Returns features to be used by PairDrift. """ def __init__(self, in_dim, out_dim): super(MakeFeatures, self).__init__() self.single = nn.Linear(in_dim, out_dim) self.pair = nn.Linear(in_dim, out_dim) def forward(self, x): pairs = x[..., None, :, :] - x[..., :, None, :] return self.single(x), self.pair(pairs), pairs def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 64 x4 = xindex % 16 x0 = xindex % 4 x5 = xindex // 16 x6 = xindex tmp0 = tl.load(in_ptr0 + (x4 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (x0 + 4 * x5), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 - tmp1 tl.store(out_ptr0 + x6, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sub_0[grid(1024)](primals_1, buf0, 1024, XBLOCK= 128, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 buf2 = empty_strided_cuda((256, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf0, (256, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_4 del primals_5 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(buf2, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0 ), buf0, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf0, (256, 4), (4, 1), 0) class MakeFeaturesNew(nn.Module): """ Returns features to be used by PairDrift. """ def __init__(self, in_dim, out_dim): super(MakeFeaturesNew, self).__init__() self.single = nn.Linear(in_dim, out_dim) self.pair = nn.Linear(in_dim, out_dim) def forward(self, input_0): primals_2 = self.single.weight primals_3 = self.single.bias primals_4 = self.pair.weight primals_5 = self.pair.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1], output[2]
AustenLamacraft/QuaRL
MakeFeatures
false
7,732
[ "MIT" ]
13
1764f0ccd0ba90d44e799b6ac908df76be14a52e
https://github.com/AustenLamacraft/QuaRL/tree/1764f0ccd0ba90d44e799b6ac908df76be14a52e
Conv5x5
import torch import torch.nn as nn class Conv5x5(nn.Module): def __init__(self, in_channels, out_channels, use_refl=True): super(Conv5x5, self).__init__() if use_refl: self.pad = nn.ReflectionPad2d(2) else: self.pad = nn.ZeroPad2d(2) self.conv = nn.Conv2d(int(in_channels), int(out_channels), 5) def forward(self, x): out = self.pad(x) out = self.conv(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 % 8 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-2 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-2 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 5, 5), (100, 25, 5, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(1024)](primals_1, buf0, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(256)](buf2, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0 class Conv5x5New(nn.Module): def __init__(self, in_channels, out_channels, use_refl=True): super(Conv5x5New, self).__init__() if use_refl: self.pad = nn.ReflectionPad2d(2) else: self.pad = nn.ZeroPad2d(2) self.conv = nn.Conv2d(int(in_channels), int(out_channels), 5) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ArminMasoumian/GCNDepth
Conv5x5
false
7,733
[ "MIT" ]
32
9fa77812fa944c2701a45f09acf988815ca50aee
https://github.com/ArminMasoumian/GCNDepth/tree/9fa77812fa944c2701a45f09acf988815ca50aee
BCEBlurWithLogitsLoss
import torch import torch.nn as nn class BCEBlurWithLogitsLoss(nn.Module): def __init__(self, alpha=0.05): super(BCEBlurWithLogitsLoss, self).__init__() self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') self.alpha = alpha def forward(self, pred, true): loss = self.loss_fcn(pred, true) pred = torch.sigmoid(pred) dx = pred - true alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 0.0001)) loss *= alpha_factor return loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_div_exp_mean_mul_rsub_sigmoid_sub_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tl.sigmoid(tmp3) tmp14 = tmp13 - tmp0 tmp15 = tmp14 - tmp1 tmp16 = 19.96007984031936 tmp17 = tmp15 * tmp16 tmp18 = tl_math.exp(tmp17) tmp19 = tmp1 - tmp18 tmp20 = tmp12 * tmp19 tmp21 = tl.broadcast_to(tmp20, [RBLOCK]) tmp23 = triton_helpers.promote_to_tensor(tl.sum(tmp21, 0)) tmp24 = 256.0 tmp25 = tmp23 / tmp24 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp25, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_div_exp_mean_mul_rsub_sigmoid_sub_0[ grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class BCEBlurWithLogitsLossNew(nn.Module): def __init__(self, alpha=0.05): super(BCEBlurWithLogitsLossNew, self).__init__() self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') self.alpha = alpha def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
AsakusaRinne/tensorrt_yolov5_tracker
BCEBlurWithLogitsLoss
false
7,734
[ "MIT" ]
22
b9a3a6fc94710e8291d6a614ed2b04cbc4c56599
https://github.com/AsakusaRinne/tensorrt_yolov5_tracker/tree/b9a3a6fc94710e8291d6a614ed2b04cbc4c56599
GCNSynthetic
import math import torch import torch.nn.functional as F import torch.nn as nn from torch.nn.parameter import Parameter class GraphConvolution(nn.Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if bias: self.bias = Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj): support = torch.mm(input, self.weight) output = torch.spmm(adj, support) if self.bias is not None: return output + self.bias else: return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GCNSynthetic(nn.Module): """ 3-layer GCN used in GNN Explainer synthetic tasks, including """ def __init__(self, nfeat, nhid, nout, nclass, dropout): super(GCNSynthetic, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nhid) self.gc3 = GraphConvolution(nhid, nout) self.lin = nn.Linear(nhid + nhid + nout, nclass) self.dropout = dropout def forward(self, x, adj): x1 = F.relu(self.gc1(x, adj)) x1 = F.dropout(x1, self.dropout, training=self.training) x2 = F.relu(self.gc2(x1, adj)) x2 = F.dropout(x2, self.dropout, training=self.training) x3 = self.gc3(x2, adj) x = self.lin(torch.cat((x1, x2, x3), dim=1)) return F.log_softmax(x, dim=1) def loss(self, pred, label): return F.nll_loss(pred, label) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'nfeat': 4, 'nhid': 4, 'nout': 4, 'nclass': 4, 'dropout': 0.5} ]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn.functional as F import torch.nn as nn from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_cat_relu_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + (x0 + 12 * x1), tmp4, xmask) @triton.jit def triton_poi_fused_add_cat_relu_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + (x0 + 12 * x1), tmp4, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 12), (12, 1)) assert_size_stride(primals_10, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf0, out=buf1) buf2 = buf1 del buf1 buf10 = empty_strided_cuda((4, 12), (12, 1), torch.float32) buf8 = reinterpret_tensor(buf10, (4, 4), (12, 1), 0) get_raw_stream(0) triton_poi_fused_add_cat_relu_0[grid(16)](buf2, primals_4, buf8, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf3 = buf0 del buf0 extern_kernels.mm(buf2, primals_5, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf3, out=buf4) buf5 = buf4 del buf4 buf9 = reinterpret_tensor(buf10, (4, 4), (12, 1), 4) triton_poi_fused_add_cat_relu_1[grid(16)](buf5, primals_6, buf9, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_6 buf6 = buf3 del buf3 extern_kernels.mm(buf5, primals_7, out=buf6) buf7 = reinterpret_tensor(buf10, (4, 4), (12, 1), 8) extern_kernels.addmm(primals_8, primals_3, buf6, alpha=1, beta=1, out=buf7) del primals_8 buf11 = buf6 del buf6 extern_kernels.addmm(primals_10, buf10, reinterpret_tensor( primals_9, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf11) del primals_10 buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__log_softmax_2[grid(16)](buf11, buf12, 16, XBLOCK= 16, num_warps=1, num_stages=1) buf13 = buf11 del buf11 triton_poi_fused__log_softmax_3[grid(16)](buf12, buf13, 16, XBLOCK= 16, num_warps=1, num_stages=1) del buf12 return buf13, buf2, buf5, buf10, buf13, primals_9, reinterpret_tensor( primals_3, (4, 4), (1, 4), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0) class GraphConvolution(nn.Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if bias: self.bias = Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj): support = torch.mm(input, self.weight) output = torch.spmm(adj, support) if self.bias is not None: return output + self.bias else: return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GCNSyntheticNew(nn.Module): """ 3-layer GCN used in GNN Explainer synthetic tasks, including """ def __init__(self, nfeat, nhid, nout, nclass, dropout): super(GCNSyntheticNew, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nhid) self.gc3 = GraphConvolution(nhid, nout) self.lin = nn.Linear(nhid + nhid + nout, nclass) self.dropout = dropout def loss(self, pred, label): return F.nll_loss(pred, label) def forward(self, input_0, input_1): primals_1 = self.gc1.weight primals_4 = self.gc1.bias primals_2 = self.gc2.weight primals_6 = self.gc2.bias primals_3 = self.gc3.weight primals_8 = self.gc3.bias primals_9 = self.lin.weight primals_10 = self.lin.bias primals_5 = input_0 primals_7 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
Armagaan/cf-gnnexplainer
GCNSynthetic
false
7,735
[ "MIT" ]
15
22b415e114c52d8d60ca45a40c3cb33c1947400c
https://github.com/Armagaan/cf-gnnexplainer/tree/22b415e114c52d8d60ca45a40c3cb33c1947400c
BertLayerNorm
import torch from torch import nn class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_pow_sqrt_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = 4.0 tmp14 = tmp12 / tmp13 tmp15 = 1e-12 tmp16 = tmp14 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp1 / tmp17 tmp19 = tmp0 * tmp18 tmp21 = tmp19 + tmp20 tl.store(out_ptr0 + x2, tmp21, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_sub_0[grid(256)](primals_1, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_pow_sqrt_1[grid(256)](primals_2, buf0, primals_3, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 del primals_3 return buf1, primals_1 class BertLayerNormNew(nn.Module): def __init__(self, hidden_size, eps=1e-12): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNormNew, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ArrowLuo/GRACE
BertLayerNorm
false
7,736
[ "Apache-2.0" ]
17
f27b500ba905685c03eee6d91d87adc9ef78b4d1
https://github.com/ArrowLuo/GRACE/tree/f27b500ba905685c03eee6d91d87adc9ef78b4d1
Net
import torch class Net(torch.nn.Module): def __init__(self, n_input, n_hidden, n_output): super(Net, self).__init__() self.hidden1 = torch.nn.Linear(n_input, n_hidden) self.hidden2 = torch.nn.Linear(n_hidden, n_hidden) self.hidden3 = torch.nn.Linear(n_hidden, n_hidden) self.hidden4 = torch.nn.Linear(n_hidden, n_hidden) self.predict = torch.nn.Linear(n_hidden, n_output) def forward(self, x): x = torch.relu(self.hidden1(x)) x = torch.relu(self.hidden2(x)) x = torch.relu(self.hidden3(x)) x = torch.relu(self.hidden4(x)) x = self.predict(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_input': 4, 'n_hidden': 4, 'n_output': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf12, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3, primals_5, buf11, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf5, primals_7, buf10, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf7, primals_9, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_9 buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8) del primals_11 return reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor( buf3, (64, 4), (4, 1), 0), reinterpret_tensor(buf5, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf7, (64, 4), (4, 1), 0 ), primals_10, buf9, primals_8, buf10, primals_6, buf11, primals_4, buf12 class NetNew(torch.nn.Module): def __init__(self, n_input, n_hidden, n_output): super(NetNew, self).__init__() self.hidden1 = torch.nn.Linear(n_input, n_hidden) self.hidden2 = torch.nn.Linear(n_hidden, n_hidden) self.hidden3 = torch.nn.Linear(n_hidden, n_hidden) self.hidden4 = torch.nn.Linear(n_hidden, n_hidden) self.predict = torch.nn.Linear(n_hidden, n_output) def forward(self, input_0): primals_1 = self.hidden1.weight primals_2 = self.hidden1.bias primals_4 = self.hidden2.weight primals_5 = self.hidden2.bias primals_6 = self.hidden3.weight primals_7 = self.hidden3.bias primals_8 = self.hidden4.weight primals_9 = self.hidden4.bias primals_10 = self.predict.weight primals_11 = self.predict.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
AstroHiro/NCM
Net
false
7,737
[ "MIT" ]
23
720db63ec018a1986ac9e370613f8209328b89e1
https://github.com/AstroHiro/NCM/tree/720db63ec018a1986ac9e370613f8209328b89e1
CrossEntropyLossOneHot
import torch import torch.nn as nn class CrossEntropyLossOneHot(nn.Module): def __init__(self): super(CrossEntropyLossOneHot, self).__init__() self.log_softmax = nn.LogSoftmax(dim=-1) def forward(self, preds, labels): return torch.mean(torch.sum(-labels * self.log_softmax(preds), -1)) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp1 = -tmp0 tmp3 = tl_math.exp(tmp2) tmp5 = tl_math.exp(tmp4) tmp6 = tmp3 + tmp5 tmp8 = tl_math.exp(tmp7) tmp9 = tmp6 + tmp8 tmp11 = tl_math.exp(tmp10) tmp12 = tmp9 + tmp11 tmp13 = tl_math.log(tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp1 * tmp14 tmp17 = -tmp16 tmp18 = tmp4 - tmp13 tmp19 = tmp17 * tmp18 tmp20 = tmp15 + tmp19 tmp22 = -tmp21 tmp23 = tmp7 - tmp13 tmp24 = tmp22 * tmp23 tmp25 = tmp20 + tmp24 tmp27 = -tmp26 tmp28 = tmp10 - tmp13 tmp29 = tmp27 * tmp28 tmp30 = tmp25 + tmp29 tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp33 = tl.sum(tmp31, 1)[:, None] tmp34 = 64.0 tmp35 = tmp33 / tmp34 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp35, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused__log_softmax_mean_mul_neg_sum_1[grid(1)](buf3, arg0_1, buf0, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del buf0 return buf3, class CrossEntropyLossOneHotNew(nn.Module): def __init__(self): super(CrossEntropyLossOneHotNew, self).__init__() self.log_softmax = nn.LogSoftmax(dim=-1) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
B0Qi/hualubei2020-callingsmoking
CrossEntropyLossOneHot
false
7,738
[ "MIT" ]
27
73d1049d95554b5d669afa93132a0fce37461ff4
https://github.com/B0Qi/hualubei2020-callingsmoking/tree/73d1049d95554b5d669afa93132a0fce37461ff4
Attn
import torch from torch import nn import torch.nn.functional as F class Attn(nn.Module): def __init__(self, hidden_size): super(Attn, self).__init__() self.hidden_size = hidden_size self.attn = nn.Linear(self.hidden_size * 2, hidden_size) self.v = nn.Linear(self.hidden_size, 1) def forward(self, hidden, encoder_outputs, normalize=True): encoder_outputs = encoder_outputs.transpose(0, 1) attn_energies = self.score(hidden, encoder_outputs) normalized_energy = F.softmax(attn_energies, dim=2) context = torch.bmm(normalized_energy, encoder_outputs) return context.transpose(0, 1) def score(self, hidden, encoder_outputs): max_len = encoder_outputs.size(1) H = hidden.repeat(max_len, 1, 1).transpose(0, 1) energy = self.attn(torch.cat([H, encoder_outputs], 2)) energy = self.v(F.tanh(energy)).transpose(1, 2) return energy def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x2 = xindex // 32 x1 = xindex // 8 % 4 x3 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x2 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x2 + 16 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (1, 4), (4, 1)) assert_size_stride(primals_6, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](primals_2, primals_1, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1) del primals_3 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused_tanh_1[grid(64)](buf2, primals_4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_4 buf4 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(buf2, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_6 buf5 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) triton_poi_fused__softmax_2[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 1, 4), (4, 4, 1), 0) del buf4 triton_poi_fused__softmax_3[grid(16)](buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = reinterpret_tensor(buf5, (4, 1, 4), (4, 4, 1), 0) del buf5 extern_kernels.bmm(buf6, reinterpret_tensor(primals_1, (4, 4, 4), ( 4, 16, 1), 0), out=buf7) return reinterpret_tensor(buf7, (1, 4, 4), (4, 4, 1), 0 ), reinterpret_tensor(buf0, (16, 8), (8, 1), 0 ), buf2, buf6, reinterpret_tensor(primals_1, (4, 4, 4), (4, 1, 16), 0 ), primals_5 class AttnNew(nn.Module): def __init__(self, hidden_size): super(AttnNew, self).__init__() self.hidden_size = hidden_size self.attn = nn.Linear(self.hidden_size * 2, hidden_size) self.v = nn.Linear(self.hidden_size, 1) def score(self, hidden, encoder_outputs): max_len = encoder_outputs.size(1) H = hidden.repeat(max_len, 1, 1).transpose(0, 1) energy = self.attn(torch.cat([H, encoder_outputs], 2)) energy = self.v(F.tanh(energy)).transpose(1, 2) return energy def forward(self, input_0, input_1): primals_3 = self.attn.weight primals_4 = self.attn.bias primals_5 = self.v.weight primals_6 = self.v.bias primals_2 = input_0 primals_1 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
AuCson/SEDST
Attn
false
7,739
[ "MIT" ]
23
1c1691e2abc50eb2120ed49c874090f6c4f741d3
https://github.com/AuCson/SEDST/tree/1c1691e2abc50eb2120ed49c874090f6c4f741d3
PermEqMean
import torch import torch.nn as nn class PermEqMean(nn.Module): """ Returns equivariant layer used by EquivarDrift. """ def __init__(self, in_dim, out_dim): super(PermEqMean, self).__init__() self.Gamma = nn.Linear(in_dim, out_dim) self.Lambda = nn.Linear(in_dim, out_dim, bias=False) def forward(self, x): x_mean = x.mean(-2, keepdim=True) return self.Gamma(x) + self.Lambda(x_mean) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_0[grid(64)](primals_1, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) del primals_4 buf3 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused_add_1[grid(256)](buf3, primals_3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf2 del primals_3 return buf3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (16, 4), (4, 1), 0) class PermEqMeanNew(nn.Module): """ Returns equivariant layer used by EquivarDrift. """ def __init__(self, in_dim, out_dim): super(PermEqMeanNew, self).__init__() self.Gamma = nn.Linear(in_dim, out_dim) self.Lambda = nn.Linear(in_dim, out_dim, bias=False) def forward(self, input_0): primals_2 = self.Gamma.weight primals_3 = self.Gamma.bias primals_4 = self.Lambda.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
AustenLamacraft/QuaRL
PermEqMean
false
7,740
[ "MIT" ]
13
1764f0ccd0ba90d44e799b6ac908df76be14a52e
https://github.com/AustenLamacraft/QuaRL/tree/1764f0ccd0ba90d44e799b6ac908df76be14a52e
CombineFeatures
import torch import torch.nn as nn class CombineFeatures(nn.Module): """ Returns layer to be used by PairDrift. """ def __init__(self, in_dim, out_dim, zero_init=False): super(CombineFeatures, self).__init__() self.single = nn.Linear(in_dim, out_dim) self.pair = nn.Linear(in_dim, out_dim) if zero_init: self.single.weight.data = torch.zeros(out_dim, in_dim) self.single.bias.data = torch.zeros(out_dim) self.pair.weight.data = torch.zeros(out_dim, in_dim) self.pair.bias.data = torch.zeros(out_dim) def forward(self, s, p): return self.single(s) + self.pair(p).sum(dim=-3) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x2 = xindex // 16 % 4 x5 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x5 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr1 + (16 + x5 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr1 + (32 + x5 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr1 + (48 + x5 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp9 = tmp7 + tmp8 tmp10 = tmp2 + tmp9 tl.store(in_out_ptr0 + x4, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_sum_0[grid(256)](buf2, primals_2, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_2 return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0) class CombineFeaturesNew(nn.Module): """ Returns layer to be used by PairDrift. """ def __init__(self, in_dim, out_dim, zero_init=False): super(CombineFeaturesNew, self).__init__() self.single = nn.Linear(in_dim, out_dim) self.pair = nn.Linear(in_dim, out_dim) if zero_init: self.single.weight.data = torch.zeros(out_dim, in_dim) self.single.bias.data = torch.zeros(out_dim) self.pair.weight.data = torch.zeros(out_dim, in_dim) self.pair.bias.data = torch.zeros(out_dim) def forward(self, input_0, input_1): primals_1 = self.single.weight primals_2 = self.single.bias primals_4 = self.pair.weight primals_5 = self.pair.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
AustenLamacraft/QuaRL
CombineFeatures
false
7,741
[ "MIT" ]
13
1764f0ccd0ba90d44e799b6ac908df76be14a52e
https://github.com/AustenLamacraft/QuaRL/tree/1764f0ccd0ba90d44e799b6ac908df76be14a52e
disparityentropy
import torch from torch import nn import torch.utils.data import torch.nn.parallel class disparityentropy(nn.Module): def __init__(self, maxdisp): super(disparityentropy, self).__init__() def forward(self, x): out = torch.sum(-x * torch.log(x), 1) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'maxdisp': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn import torch.utils.data import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_log_mul_neg_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp9 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp14 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp1 = -tmp0 tmp2 = tl_math.log(tmp0) tmp3 = tmp1 * tmp2 tmp5 = -tmp4 tmp6 = tl_math.log(tmp4) tmp7 = tmp5 * tmp6 tmp8 = tmp3 + tmp7 tmp10 = -tmp9 tmp11 = tl_math.log(tmp9) tmp12 = tmp10 * tmp11 tmp13 = tmp8 + tmp12 tmp15 = -tmp14 tmp16 = tl_math.log(tmp14) tmp17 = tmp15 * tmp16 tmp18 = tmp13 + tmp17 tl.store(out_ptr0 + x2, tmp18, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_log_mul_neg_sum_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class disparityentropyNew(nn.Module): def __init__(self, maxdisp): super(disparityentropyNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AvrilCheng/LidarStereoNet
disparityentropy
false
7,742
[ "MIT" ]
27
96c7cd6d5edb9b2fd302e2edd0c05cbda1ed024e
https://github.com/AvrilCheng/LidarStereoNet/tree/96c7cd6d5edb9b2fd302e2edd0c05cbda1ed024e
ConvReLUNorm
import torch import torch.utils.data import torch.nn.functional as F class ConvReLUNorm(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size=1, dropout=0.0): super(ConvReLUNorm, self).__init__() self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size= kernel_size, padding=kernel_size // 2) self.norm = torch.nn.LayerNorm(out_channels) self.dropout_val = dropout self.dropout = torch.nn.Dropout(dropout) def forward(self, signal): out = F.relu(self.conv(signal)) out = self.norm(out.transpose(1, 2)).transpose(1, 2) if self.dropout_val > 0.0: out = self.dropout(out) return out def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp6 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp9 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp1, tmp3) tmp5 = tmp2 + tmp4 tmp7 = triton_helpers.maximum(tmp1, tmp6) tmp8 = tmp5 + tmp7 tmp10 = triton_helpers.maximum(tmp1, tmp9) tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp14 tmp16 = tmp4 - tmp13 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp7 - tmp13 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp10 - tmp13 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp24 / tmp12 tmp26 = 1e-05 tmp27 = tmp25 + tmp26 tmp28 = libdevice.rsqrt(tmp27) tl.store(out_ptr0 + x2, tmp13, xmask) tl.store(out_ptr1 + x2, tmp28, xmask) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + y3, ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + y3, ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 - tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 * tmp7 tmp10 = tmp8 + tmp9 tl.store(out_ptr0 + (x2 + 4 * y3), tmp10, xmask & ymask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4), (16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(64)](buf1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_native_layer_norm_1[grid(16)](buf1, buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_2[grid(16, 4)](buf1, buf2, buf3, primals_4, primals_5, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del buf2 del buf3 del primals_5 return reinterpret_tensor(buf4, (4, 4, 4), (16, 1, 4), 0 ), primals_1, primals_3, primals_4, buf1 class ConvReLUNormNew(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size=1, dropout=0.0): super(ConvReLUNormNew, self).__init__() self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size= kernel_size, padding=kernel_size // 2) self.norm = torch.nn.LayerNorm(out_channels) self.dropout_val = dropout self.dropout = torch.nn.Dropout(dropout) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_4 = self.norm.weight primals_5 = self.norm.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
AstraliteHeart/cookietts
ConvReLUNorm
false
7,743
[ "BSD-3-Clause" ]
25
c871f5f7b5790656d5b57bcd9e63946a2da52f0f
https://github.com/AstraliteHeart/cookietts/tree/c871f5f7b5790656d5b57bcd9e63946a2da52f0f
ScaledDotProductAttention
import torch import numpy as np from torch import nn class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) self.softmax = nn.Softmax(dim=2) def forward(self, q, k, v, mask=None): attn = torch.bmm(q, k.transpose(1, 2)) attn = attn / self.temperature if mask is not None: attn = attn.masked_fill(mask, -np.inf) attn = self.softmax(attn) attn = self.dropout(attn) output = torch.bmm(attn, v) return output, attn def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'temperature': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 extern_kernels.bmm(buf2, arg2_1, out=buf3) del arg2_1 return buf3, buf2 class ScaledDotProductAttentionNew(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) self.softmax = nn.Softmax(dim=2) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
ArrowLuo/GRACE
ScaledDotProductAttention
false
7,744
[ "Apache-2.0" ]
17
f27b500ba905685c03eee6d91d87adc9ef78b4d1
https://github.com/ArrowLuo/GRACE/tree/f27b500ba905685c03eee6d91d87adc9ef78b4d1
ConvNorm
import torch import torch.utils.data import torch.nn.functional as F class ConvNorm(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=None, dilation=1, bias=True, w_init_gain='linear', dropout=0.0 ): super(ConvNorm, self).__init__() if padding is None: assert kernel_size % 2 == 1 padding = int(dilation * (kernel_size - 1) / 2) self.dropout = dropout self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size= kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) torch.nn.init.xavier_uniform_(self.conv.weight, gain=torch.nn.init. calculate_gain(w_init_gain)) def forward(self, signal): conv_signal = self.conv(signal) if self.training and self.dropout > 0.0: conv_signal = F.dropout(conv_signal, p=self.dropout) return conv_signal def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4), (16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(16)](buf1, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (4, 4), (4, 1), 0 ), primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0) class ConvNormNew(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=None, dilation=1, bias=True, w_init_gain='linear', dropout=0.0 ): super(ConvNormNew, self).__init__() if padding is None: assert kernel_size % 2 == 1 padding = int(dilation * (kernel_size - 1) / 2) self.dropout = dropout self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size= kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) torch.nn.init.xavier_uniform_(self.conv.weight, gain=torch.nn.init. calculate_gain(w_init_gain)) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AstraliteHeart/cookietts
ConvNorm
false
7,745
[ "BSD-3-Clause" ]
25
c871f5f7b5790656d5b57bcd9e63946a2da52f0f
https://github.com/AstraliteHeart/cookietts/tree/c871f5f7b5790656d5b57bcd9e63946a2da52f0f
Conv1d
import torch import torch.utils.data from torch import nn from torch.nn import Conv1d class Conv1d(nn.Conv1d): """ :param in_channels: Scalar :param out_channels: Scalar :param kernel_size: Scalar :param activation_fn: activation function :param drop_rate: Scalar. dropout rate :param stride: Scalar :param padding: padding type :param dilation: Scalar :param groups: Scalar :param bias: Boolean. :param bn: Boolean. whether it uses batch normalization """ def __init__(self, in_channels, out_channels, kernel_size, activation_fn=None, drop_rate=0.0, stride=1, padding='same', dilation=1, groups=1, bias=True, bn=False): self.activation_fn = activation_fn self.drop_rate = drop_rate if padding == 'same': padding = kernel_size // 2 * dilation self.even_kernel = not bool(kernel_size % 2) super(Conv1d, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups= groups, bias=bias) self.drop_out = nn.Dropout(drop_rate) if drop_rate > 0 else None self.batch_norm = nn.BatchNorm1d(out_channels, eps=0.001, momentum= 0.001) if bn else None def forward(self, x): """ :param x: (N, C_in, T) Tensor. Returns: y: (N, C_out, T) Tensor. """ y = super(Conv1d, self).forward(x) y = self.batch_norm(y) if self.batch_norm is not None else y y = self.activation_fn(y) if self.activation_fn is not None else y y = self.drop_out(y) if self.drop_out is not None else y y = y[:, :, :-1] if self.even_kernel else y return y def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 5 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 5), (20, 5, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(80)](buf1, primals_2, 80, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (4, 4, 4), (20, 5, 1), 0 ), primals_1, primals_3 class Conv1dNew(nn.Conv1d): """ :param in_channels: Scalar :param out_channels: Scalar :param kernel_size: Scalar :param activation_fn: activation function :param drop_rate: Scalar. dropout rate :param stride: Scalar :param padding: padding type :param dilation: Scalar :param groups: Scalar :param bias: Boolean. :param bn: Boolean. whether it uses batch normalization """ def __init__(self, in_channels, out_channels, kernel_size, activation_fn=None, drop_rate=0.0, stride=1, padding='same', dilation=1, groups=1, bias=True, bn=False): self.activation_fn = activation_fn self.drop_rate = drop_rate if padding == 'same': padding = kernel_size // 2 * dilation self.even_kernel = not bool(kernel_size % 2) super(Conv1dNew, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.drop_out = nn.Dropout(drop_rate) if drop_rate > 0 else None self.batch_norm = nn.BatchNorm1d(out_channels, eps=0.001, momentum= 0.001) if bn else None def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AstraliteHeart/cookietts
Conv1d
false
7,746
[ "BSD-3-Clause" ]
25
c871f5f7b5790656d5b57bcd9e63946a2da52f0f
https://github.com/AstraliteHeart/cookietts/tree/c871f5f7b5790656d5b57bcd9e63946a2da52f0f
Conv2d
import torch import torch.utils.data from torch import nn from torch.nn import Conv2d class Conv2d(nn.Conv2d): """ :param in_channels: Scalar :param out_channels: Scalar :param kernel_size: Scalar :param activation_fn: activation function :param drop_rate: Scalar. dropout rate :param stride: Scalar :param padding: padding type :param dilation: Scalar :param groups: Scalar. :param bias: Boolean. :param bn: Boolean. whether it uses batch normalization """ def __init__(self, in_channels, out_channels, kernel_size, activation_fn=None, drop_rate=0.0, stride=1, padding='same', dilation=1, groups=1, bias=True, bn=False): self.activation_fn = activation_fn self.drop_rate = drop_rate if padding == 'same': padding = kernel_size // 2 * dilation self.even_kernel = not bool(kernel_size % 2) super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups= groups, bias=bias) self.drop_out = nn.Dropout(drop_rate) if drop_rate > 0 else None self.batch_norm = nn.BatchNorm2d(out_channels, eps=0.001, momentum= 0.001) if bn else None def forward(self, x): """ :param x: (N, C_in, T) Tensor. Returns: y: (N, C_out, T) Tensor. """ y = super(Conv2d, self).forward(x) y = self.batch_norm(y) if self.batch_norm is not None else y y = self.activation_fn(y) if self.activation_fn is not None else y y = self.drop_out(y) if self.drop_out is not None else y y = y[:, :, :-1] if self.even_kernel else y return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 25 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 5, 5), (100, 25, 5, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(400)](buf1, primals_2, 400, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (4, 4, 4, 5), (100, 25, 5, 1), 0 ), primals_1, primals_3 class Conv2dNew(nn.Conv2d): """ :param in_channels: Scalar :param out_channels: Scalar :param kernel_size: Scalar :param activation_fn: activation function :param drop_rate: Scalar. dropout rate :param stride: Scalar :param padding: padding type :param dilation: Scalar :param groups: Scalar. :param bias: Boolean. :param bn: Boolean. whether it uses batch normalization """ def __init__(self, in_channels, out_channels, kernel_size, activation_fn=None, drop_rate=0.0, stride=1, padding='same', dilation=1, groups=1, bias=True, bn=False): self.activation_fn = activation_fn self.drop_rate = drop_rate if padding == 'same': padding = kernel_size // 2 * dilation self.even_kernel = not bool(kernel_size % 2) super(Conv2dNew, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.drop_out = nn.Dropout(drop_rate) if drop_rate > 0 else None self.batch_norm = nn.BatchNorm2d(out_channels, eps=0.001, momentum= 0.001) if bn else None def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AstraliteHeart/cookietts
Conv2d
false
7,747
[ "BSD-3-Clause" ]
25
c871f5f7b5790656d5b57bcd9e63946a2da52f0f
https://github.com/AstraliteHeart/cookietts/tree/c871f5f7b5790656d5b57bcd9e63946a2da52f0f
PositionwiseFeedForward
import math import torch from torch import nn def gelu(x): """Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) class PositionwiseFeedForward(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1 = nn.Conv1d(d_in, d_hid, 1) self.w_2 = nn.Conv1d(d_hid, d_in, 1) self.layer_norm = nn.LayerNorm(d_in) self.dropout = nn.Dropout(dropout) def forward(self, x): residual = x output = x.transpose(1, 2) output = self.w_2(gelu(self.w_1(output))) output = output.transpose(1, 2) output = self.dropout(output) output = self.layer_norm(output + residual) return output def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_in': 4, 'd_hid': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_convolution_div_erf_mul_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = 0.7071067811865475 tmp6 = tmp2 * tmp5 tmp7 = libdevice.erf(tmp6) tmp8 = 1.0 tmp9 = tmp7 + tmp8 tmp10 = tmp4 * tmp9 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp4 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp8 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp12 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) tl.store(out_ptr1 + x2, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2 + 4 * y3), xmask & ymask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + y3, ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + y3, ymask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2 + 4 * y3), tmp13, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = buf1 del buf1 buf3 = buf0 del buf0 triton_poi_fused_add_convolution_div_erf_mul_1[grid(64)](buf2, primals_3, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4), (16, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(64)](buf5, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_3[grid(16)](buf5, primals_1, buf6, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_4[grid(16, 4)](buf5, primals_1, buf6, buf7, primals_6, primals_7, buf8, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del buf6 del buf7 del primals_7 return buf8, primals_1, primals_2, primals_4, primals_6, buf2, buf3, buf5 def gelu(x): """Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) class PositionwiseFeedForwardNew(nn.Module): """ A two-feed-forward-layer module """ def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1 = nn.Conv1d(d_in, d_hid, 1) self.w_2 = nn.Conv1d(d_hid, d_in, 1) self.layer_norm = nn.LayerNorm(d_in) self.dropout = nn.Dropout(dropout) def forward(self, input_0): primals_2 = self.w_1.weight primals_3 = self.w_1.bias primals_4 = self.w_2.weight primals_5 = self.w_2.bias primals_6 = self.layer_norm.weight primals_7 = self.layer_norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
ArrowLuo/GRACE
PositionwiseFeedForward
false
7,748
[ "Apache-2.0" ]
17
f27b500ba905685c03eee6d91d87adc9ef78b4d1
https://github.com/ArrowLuo/GRACE/tree/f27b500ba905685c03eee6d91d87adc9ef78b4d1
Conv2d
from torch.nn import Module import math import torch from torch.nn import functional as F import torch.utils.data from torch.nn.parameter import Parameter from torch.nn.functional import pad from torch.nn.modules import Module from torch.nn.modules.utils import _pair import torch.nn.parallel def conv2d_same_padding(input, weight, bias=None, stride=1, padding=1, dilation=1, groups=1): input_rows = input.size(2) filter_rows = weight.size(2) out_rows = (input_rows + stride[0] - 1) // stride[0] padding_rows = max(0, (out_rows - 1) * stride[0] + (filter_rows - 1) * dilation[0] + 1 - input_rows) rows_odd = padding_rows % 2 != 0 padding_cols = max(0, (out_rows - 1) * stride[0] + (filter_rows - 1) * dilation[0] + 1 - input_rows) cols_odd = padding_rows % 2 != 0 if rows_odd or cols_odd: input = pad(input, [0, int(cols_odd), 0, int(rows_odd)]) return F.conv2d(input, weight, bias, stride, padding=(padding_rows // 2, padding_cols // 2), dilation=dilation, groups=groups) class _ConvNd(Module): def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, transposed, output_padding, groups, bias): super(_ConvNd, self).__init__() if in_channels % groups != 0: raise ValueError('in_channels must be divisible by groups') if out_channels % groups != 0: raise ValueError('out_channels must be divisible by groups') self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride self.padding = padding self.dilation = dilation self.transposed = transposed self.output_padding = output_padding self.groups = groups if transposed: self.weight = Parameter(torch.Tensor(in_channels, out_channels // groups, *kernel_size)) else: self.weight = Parameter(torch.Tensor(out_channels, in_channels // groups, *kernel_size)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): n = self.in_channels for k in self.kernel_size: n *= k stdv = 1.0 / math.sqrt(n) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def __repr__(self): s = ( '{name}({in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}' ) if self.padding != (0,) * len(self.padding): s += ', padding={padding}' if self.dilation != (1,) * len(self.dilation): s += ', dilation={dilation}' if self.output_padding != (0,) * len(self.output_padding): s += ', output_padding={output_padding}' if self.groups != 1: s += ', groups={groups}' if self.bias is None: s += ', bias=False' s += ')' return s.format(name=self.__class__.__name__, **self.__dict__) class Conv2d(_ConvNd): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=1, dilation=1, groups=1, bias=True): kernel_size = _pair(kernel_size) stride = _pair(stride) padding = _pair(padding) dilation = _pair(dilation) super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, False, _pair(0), groups, bias) def forward(self, input): return conv2d_same_padding(input, self.weight, self.bias, self. stride, self.padding, self.dilation, self.groups) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module import math from torch.nn import functional as F import torch.utils.data from torch.nn.parameter import Parameter from torch.nn.functional import pad from torch.nn.modules import Module from torch.nn.modules.utils import _pair import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 5 % 5 x0 = xindex % 5 x2 = xindex // 25 x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = x0 tmp4 = tmp3 < tmp1 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + x3, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(400)](primals_3, buf0, 400, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(256)](buf2, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf2, primals_1, buf0 def conv2d_same_padding(input, weight, bias=None, stride=1, padding=1, dilation=1, groups=1): input_rows = input.size(2) filter_rows = weight.size(2) out_rows = (input_rows + stride[0] - 1) // stride[0] padding_rows = max(0, (out_rows - 1) * stride[0] + (filter_rows - 1) * dilation[0] + 1 - input_rows) rows_odd = padding_rows % 2 != 0 padding_cols = max(0, (out_rows - 1) * stride[0] + (filter_rows - 1) * dilation[0] + 1 - input_rows) cols_odd = padding_rows % 2 != 0 if rows_odd or cols_odd: input = pad(input, [0, int(cols_odd), 0, int(rows_odd)]) return F.conv2d(input, weight, bias, stride, padding=(padding_rows // 2, padding_cols // 2), dilation=dilation, groups=groups) class _ConvNd(Module): def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, transposed, output_padding, groups, bias): super(_ConvNd, self).__init__() if in_channels % groups != 0: raise ValueError('in_channels must be divisible by groups') if out_channels % groups != 0: raise ValueError('out_channels must be divisible by groups') self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride self.padding = padding self.dilation = dilation self.transposed = transposed self.output_padding = output_padding self.groups = groups if transposed: self.weight = Parameter(torch.Tensor(in_channels, out_channels // groups, *kernel_size)) else: self.weight = Parameter(torch.Tensor(out_channels, in_channels // groups, *kernel_size)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): n = self.in_channels for k in self.kernel_size: n *= k stdv = 1.0 / math.sqrt(n) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def __repr__(self): s = ( '{name}({in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}' ) if self.padding != (0,) * len(self.padding): s += ', padding={padding}' if self.dilation != (1,) * len(self.dilation): s += ', dilation={dilation}' if self.output_padding != (0,) * len(self.output_padding): s += ', output_padding={output_padding}' if self.groups != 1: s += ', groups={groups}' if self.bias is None: s += ', bias=False' s += ')' return s.format(name=self.__class__.__name__, **self.__dict__) class Conv2dNew(_ConvNd): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=1, dilation=1, groups=1, bias=True): kernel_size = _pair(kernel_size) stride = _pair(stride) padding = _pair(padding) dilation = _pair(dilation) super(Conv2dNew, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, False, _pair(0), groups, bias) def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AvrilCheng/LidarStereoNet
Conv2d
false
7,749
[ "MIT" ]
27
96c7cd6d5edb9b2fd302e2edd0c05cbda1ed024e
https://github.com/AvrilCheng/LidarStereoNet/tree/96c7cd6d5edb9b2fd302e2edd0c05cbda1ed024e
SiLU
import torch import torch.nn as nn class SiLU(nn.Module): """export-friendly version of nn.SiLU()""" @staticmethod def forward(x): return x * torch.sigmoid(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_0[grid(256)](arg0_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SiLUNew(nn.Module): """export-friendly version of nn.SiLU()""" def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Arui66/YOLOX
SiLU
false
7,750
[ "Apache-2.0" ]
16
7ee17936db849600817d7de05269bfdfb1a0eb48
https://github.com/Arui66/YOLOX/tree/7ee17936db849600817d7de05269bfdfb1a0eb48
Auto_Encoder_Model
import torch import torch.nn as nn import torch.nn.functional as F class Auto_Encoder_Model(nn.Module): def __init__(self): super(Auto_Encoder_Model, self).__init__() self.conv1 = nn.Conv2d(1, 64, padding=1, kernel_size=3) self.max_pool1 = nn.MaxPool2d(2) self.conv2 = nn.Conv2d(64, 32, padding=1, kernel_size=3) self.max_pool2 = nn.MaxPool2d(2) self.conv3 = nn.Conv2d(32, 16, padding=1, kernel_size=3) self.tran_conv1 = nn.ConvTranspose2d(16, 32, kernel_size=3, stride= 2, padding=1, output_padding=1) self.conv4 = nn.Conv2d(32, 32, kernel_size=3, padding=1) self.tran_conv2 = nn.ConvTranspose2d(32, 64, kernel_size=3, stride= 2, padding=1, output_padding=1) self.conv5 = nn.Conv2d(64, 1, kernel_size=3, padding=1) def forward_pass(self, x): output = F.relu(self.conv1(x)) output = self.max_pool1(output) output = F.relu(self.conv2(output)) output = self.max_pool2(output) output = F.relu(self.conv3(output)) return output def reconstruct_pass(self, x): output = F.relu(self.tran_conv1(x)) output = F.relu(self.conv4(output)) output = F.relu(self.tran_conv2(output)) output = torch.sigmoid(self.conv5(output)) return output def forward(self, x): output = self.forward_pass(x) output = self.reconstruct_pass(output) return output def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 16 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_sigmoid_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15) = args args.clear() assert_size_stride(primals_1, (64, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (16, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (16,), (1,)) assert_size_stride(primals_8, (16, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_9, (32,), (1,)) assert_size_stride(primals_10, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_11, (32,), (1,)) assert_size_stride(primals_12, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_13, (64,), (1,)) assert_size_stride(primals_14, (1, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_15, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_2, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) buf3 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(262144)](buf1, buf2, buf3, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 32, 32), (32768, 1024, 32, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(131072)](buf5, primals_5, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.float32) buf7 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(32768)](buf5, buf6, buf7, 32768, XBLOCK=128, num_warps=4, num_stages=1) buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 16, 16, 16), (4096, 256, 16, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_4[grid(16384)](buf9, primals_7, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf10 = extern_kernels.convolution(buf9, primals_8, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None) assert_size_stride(buf10, (4, 32, 32, 32), (32768, 1024, 32, 1)) buf11 = buf10 del buf10 triton_poi_fused_convolution_relu_2[grid(131072)](buf11, primals_9, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_9 buf12 = extern_kernels.convolution(buf11, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 32, 32, 32), (32768, 1024, 32, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_relu_2[grid(131072)](buf13, primals_11, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_11 buf14 = extern_kernels.convolution(buf13, primals_12, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None) assert_size_stride(buf14, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf15 = buf14 del buf14 triton_poi_fused_convolution_relu_0[grid(1048576)](buf15, primals_13, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf17 = buf16 del buf16 triton_poi_fused_convolution_sigmoid_5[grid(16384)](buf17, primals_15, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_15 return (buf17, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf11, buf13, buf15, buf17) class Auto_Encoder_ModelNew(nn.Module): def __init__(self): super(Auto_Encoder_ModelNew, self).__init__() self.conv1 = nn.Conv2d(1, 64, padding=1, kernel_size=3) self.max_pool1 = nn.MaxPool2d(2) self.conv2 = nn.Conv2d(64, 32, padding=1, kernel_size=3) self.max_pool2 = nn.MaxPool2d(2) self.conv3 = nn.Conv2d(32, 16, padding=1, kernel_size=3) self.tran_conv1 = nn.ConvTranspose2d(16, 32, kernel_size=3, stride= 2, padding=1, output_padding=1) self.conv4 = nn.Conv2d(32, 32, kernel_size=3, padding=1) self.tran_conv2 = nn.ConvTranspose2d(32, 64, kernel_size=3, stride= 2, padding=1, output_padding=1) self.conv5 = nn.Conv2d(64, 1, kernel_size=3, padding=1) def forward_pass(self, x): output = F.relu(self.conv1(x)) output = self.max_pool1(output) output = F.relu(self.conv2(output)) output = self.max_pool2(output) output = F.relu(self.conv3(output)) return output def reconstruct_pass(self, x): output = F.relu(self.tran_conv1(x)) output = F.relu(self.conv4(output)) output = F.relu(self.tran_conv2(output)) output = torch.sigmoid(self.conv5(output)) return output def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.tran_conv1.weight primals_9 = self.tran_conv1.bias primals_10 = self.conv4.weight primals_11 = self.conv4.bias primals_12 = self.tran_conv2.weight primals_13 = self.tran_conv2.bias primals_14 = self.conv5.weight primals_15 = self.conv5.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return output[0]
Awenbocc/med-vqa
Auto_Encoder_Model
false
7,751
[ "MIT" ]
27
0cca6811e38cf54aff6a7cce3442296d07875e64
https://github.com/Awenbocc/med-vqa/tree/0cca6811e38cf54aff6a7cce3442296d07875e64
Attention_SEblock
import torch import torch.nn as nn import torch.nn.functional as F class Attention_SEblock(nn.Module): def __init__(self, channels, reduction, temperature): super(Attention_SEblock, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc1 = nn.Linear(channels, channels // reduction) self.relu = nn.ReLU(inplace=True) self.fc2 = nn.Linear(channels // reduction, 2) self.fc2.bias.data[0] = 0.1 self.fc2.bias.data[1] = 2 self.temperature = temperature self.channels = channels def forward(self, x): x = self.avg_pool(x).view(-1, self.channels) x = self.fc1(x) x = self.relu(x) x = self.fc2(x) x = F.gumbel_softmax(x, tau=1, hard=True) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'reduction': 4, 'temperature': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tl.store(in_out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused__softmax_add_log_neg_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 2 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + 2 * x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (1 + 2 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr1 + 1) tmp12 = tl.broadcast_to(tmp11, [XBLOCK]) tmp14 = tl.load(in_ptr2 + (1 + 2 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tmp0 + tmp2 tmp5 = tl_math.log(tmp4) tmp6 = -tmp5 tmp7 = tmp3 + tmp6 tmp8 = 1.0 tmp9 = tmp7 * tmp8 tmp13 = tmp10 + tmp12 tmp15 = tl_math.log(tmp14) tmp16 = -tmp15 tmp17 = tmp13 + tmp16 tmp18 = tmp17 * tmp8 tmp19 = triton_helpers.maximum(tmp9, tmp18) tmp20 = tmp9 - tmp19 tmp21 = tmp20 * tmp8 tmp22 = tl_math.exp(tmp21) tmp23 = tmp18 - tmp19 tmp24 = tmp23 * tmp8 tmp25 = tl_math.exp(tmp24) tmp26 = tmp22 + tmp25 tl.store(out_ptr0 + x0, tmp19, xmask) tl.store(out_ptr1 + x0, tmp26, xmask) @triton.jit def triton_poi_fused__softmax_add_log_neg_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 x1 = xindex // 2 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp9 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tl_math.log(tmp3) tmp5 = -tmp4 tmp6 = tmp2 + tmp5 tmp7 = 1.0 tmp8 = tmp6 * tmp7 tmp10 = tmp8 - tmp9 tmp11 = tmp10 * tmp7 tmp12 = tl_math.exp(tmp11) tmp14 = tmp12 / tmp13 tl.store(in_out_ptr0 + x2, tmp14, xmask) @triton.jit def triton_poi_fused_add_max_scatter_sub_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 2 x0 = xindex % 2 x2 = xindex tmp0 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr0 + x2, xmask) tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1], 0, tl.int64) tmp11 = tl.full([1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp17 = x0 tmp18 = tmp16 == tmp17 tmp19 = 1.0 tmp20 = 0.0 tmp21 = tl.where(tmp18, tmp19, tmp20) tmp23 = tmp21 - tmp22 tmp24 = tmp23 + tmp22 tl.store(out_ptr0 + x2, tmp24, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (2, 1), (1, 1)) assert_size_stride(primals_5, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_1 buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), out=buf2) del primals_2 buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(4)](buf3, primals_3, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf4 = empty_strided_cuda((4, 2), (2, 1), torch.float32) extern_kernels.mm(buf3, reinterpret_tensor(primals_4, (1, 2), (1, 1 ), 0), out=buf4) buf5 = empty_strided_cuda((4, 2), (2, 1), torch.float32) buf6 = torch.ops.aten.exponential.default(buf5) del buf5 buf7 = buf6 del buf6 buf8 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf9 = empty_strided_cuda((4, 1), (1, 4), torch.float32) triton_poi_fused__softmax_add_log_neg_2[grid(4)](buf4, primals_5, buf7, buf8, buf9, 4, XBLOCK=4, num_warps=1, num_stages=1) buf10 = buf4 del buf4 triton_poi_fused__softmax_add_log_neg_3[grid(8)](buf10, primals_5, buf7, buf8, buf9, 8, XBLOCK=8, num_warps=1, num_stages=1) del buf8 del buf9 del primals_5 buf11 = buf7 del buf7 triton_poi_fused_add_max_scatter_sub_4[grid(8)](buf10, buf11, 8, XBLOCK=8, num_warps=1, num_stages=1) return buf11, reinterpret_tensor(buf1, (4, 4), (4, 1), 0 ), buf3, buf10, primals_4 class Attention_SEblockNew(nn.Module): def __init__(self, channels, reduction, temperature): super(Attention_SEblockNew, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc1 = nn.Linear(channels, channels // reduction) self.relu = nn.ReLU(inplace=True) self.fc2 = nn.Linear(channels // reduction, 2) self.fc2.bias.data[0] = 0.1 self.fc2.bias.data[1] = 2 self.temperature = temperature self.channels = channels def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Andrew-Zhu/DyFPN
Attention_SEblock
false
7,752
[ "Apache-2.0" ]
32
a74463b59c4ce28253c2449a07c0f6692a0147a1
https://github.com/Andrew-Zhu/DyFPN/tree/a74463b59c4ce28253c2449a07c0f6692a0147a1
Conv3d
from torch.nn import Module import math import torch from torch.nn import functional as F import torch.utils.data from torch.nn.parameter import Parameter from torch.nn.functional import pad from torch.nn.modules import Module from torch.nn.modules.utils import _triple import torch.nn.parallel def conv3d_same_padding(input, weight, bias=None, stride=1, padding=1, dilation=1, groups=1): input_rows = input.size(3) filter_rows = weight.size(3) out_rows = (input_rows + stride[0] - 1) // stride[0] padding_rows = max(0, (out_rows - 1) * stride[0] + (filter_rows - 1) * dilation[0] + 1 - input_rows) rows_odd = padding_rows % 2 != 0 padding_cols = max(0, (out_rows - 1) * stride[0] + (filter_rows - 1) * dilation[0] + 1 - input_rows) cols_odd = padding_rows % 2 != 0 padding_d = max(0, (out_rows - 1) * stride[0] + (filter_rows - 1) * dilation[0] + 1 - input_rows) d_odd = padding_rows % 2 != 0 if rows_odd or cols_odd or d_odd: input = pad(input, [0, int(d_odd), 0, int(cols_odd), 0, int(rows_odd)]) return F.conv3d(input, weight, bias, stride, padding=(padding_d // 2, padding_rows // 2, padding_cols // 2), dilation=dilation, groups=groups ) class _ConvNd(Module): def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, transposed, output_padding, groups, bias): super(_ConvNd, self).__init__() if in_channels % groups != 0: raise ValueError('in_channels must be divisible by groups') if out_channels % groups != 0: raise ValueError('out_channels must be divisible by groups') self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride self.padding = padding self.dilation = dilation self.transposed = transposed self.output_padding = output_padding self.groups = groups if transposed: self.weight = Parameter(torch.Tensor(in_channels, out_channels // groups, *kernel_size)) else: self.weight = Parameter(torch.Tensor(out_channels, in_channels // groups, *kernel_size)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): n = self.in_channels for k in self.kernel_size: n *= k stdv = 1.0 / math.sqrt(n) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def __repr__(self): s = ( '{name}({in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}' ) if self.padding != (0,) * len(self.padding): s += ', padding={padding}' if self.dilation != (1,) * len(self.dilation): s += ', dilation={dilation}' if self.output_padding != (0,) * len(self.output_padding): s += ', output_padding={output_padding}' if self.groups != 1: s += ', groups={groups}' if self.bias is None: s += ', bias=False' s += ')' return s.format(name=self.__class__.__name__, **self.__dict__) class Conv3d(_ConvNd): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): kernel_size = _triple(kernel_size) stride = _triple(stride) padding = _triple(padding) dilation = _triple(dilation) super(Conv3d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, False, _triple(0), groups, bias) def forward(self, input): return conv3d_same_padding(input, self.weight, self.bias, self. stride, self.padding, self.dilation, self.groups) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module import math from torch.nn import functional as F import torch.utils.data from torch.nn.parameter import Parameter from torch.nn.functional import pad from torch.nn.modules import Module from torch.nn.modules.utils import _triple import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 500 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 25 % 5 x1 = xindex // 5 % 5 x0 = xindex % 5 x3 = xindex // 125 x4 = xindex tmp0 = x2 tmp1 = tl.full([1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = x1 tmp4 = tmp3 < tmp1 tmp5 = x0 tmp6 = tmp5 < tmp1 tmp7 = tmp2 & tmp4 tmp8 = tmp7 & tmp6 tmp9 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2 + 64 * x3), tmp8 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 5, 5, 5), (125, 25, 5, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(500)](primals_3, buf0, 500, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 5, 5, 5), (0, 125, 25, 5, 1), 0), primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf1, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(256)](buf2, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_1, reinterpret_tensor(buf0, (1, 4, 5, 5, 5), (500, 125, 25, 5, 1), 0) def conv3d_same_padding(input, weight, bias=None, stride=1, padding=1, dilation=1, groups=1): input_rows = input.size(3) filter_rows = weight.size(3) out_rows = (input_rows + stride[0] - 1) // stride[0] padding_rows = max(0, (out_rows - 1) * stride[0] + (filter_rows - 1) * dilation[0] + 1 - input_rows) rows_odd = padding_rows % 2 != 0 padding_cols = max(0, (out_rows - 1) * stride[0] + (filter_rows - 1) * dilation[0] + 1 - input_rows) cols_odd = padding_rows % 2 != 0 padding_d = max(0, (out_rows - 1) * stride[0] + (filter_rows - 1) * dilation[0] + 1 - input_rows) d_odd = padding_rows % 2 != 0 if rows_odd or cols_odd or d_odd: input = pad(input, [0, int(d_odd), 0, int(cols_odd), 0, int(rows_odd)]) return F.conv3d(input, weight, bias, stride, padding=(padding_d // 2, padding_rows // 2, padding_cols // 2), dilation=dilation, groups=groups ) class _ConvNd(Module): def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, transposed, output_padding, groups, bias): super(_ConvNd, self).__init__() if in_channels % groups != 0: raise ValueError('in_channels must be divisible by groups') if out_channels % groups != 0: raise ValueError('out_channels must be divisible by groups') self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride self.padding = padding self.dilation = dilation self.transposed = transposed self.output_padding = output_padding self.groups = groups if transposed: self.weight = Parameter(torch.Tensor(in_channels, out_channels // groups, *kernel_size)) else: self.weight = Parameter(torch.Tensor(out_channels, in_channels // groups, *kernel_size)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): n = self.in_channels for k in self.kernel_size: n *= k stdv = 1.0 / math.sqrt(n) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def __repr__(self): s = ( '{name}({in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}' ) if self.padding != (0,) * len(self.padding): s += ', padding={padding}' if self.dilation != (1,) * len(self.dilation): s += ', dilation={dilation}' if self.output_padding != (0,) * len(self.output_padding): s += ', output_padding={output_padding}' if self.groups != 1: s += ', groups={groups}' if self.bias is None: s += ', bias=False' s += ')' return s.format(name=self.__class__.__name__, **self.__dict__) class Conv3dNew(_ConvNd): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): kernel_size = _triple(kernel_size) stride = _triple(stride) padding = _triple(padding) dilation = _triple(dilation) super(Conv3dNew, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, False, _triple(0), groups, bias) def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AvrilCheng/LidarStereoNet
Conv3d
false
7,753
[ "MIT" ]
27
96c7cd6d5edb9b2fd302e2edd0c05cbda1ed024e
https://github.com/AvrilCheng/LidarStereoNet/tree/96c7cd6d5edb9b2fd302e2edd0c05cbda1ed024e
ShiftedSoftplus
import torch import torch.nn.functional as F class ShiftedSoftplus(torch.nn.Module): def __init__(self): super(ShiftedSoftplus, self).__init__() self.shift = torch.log(torch.tensor(2.0)).item() def forward(self, x): return F.softplus(x) - self.shift def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_softplus_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = 0.6931471824645996 tmp7 = tmp5 - tmp6 tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_softplus_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class ShiftedSoftplusNew(torch.nn.Module): def __init__(self): super(ShiftedSoftplusNew, self).__init__() self.shift = torch.log(torch.tensor(2.0)).item() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
BaratiLab/AugLiChem
ShiftedSoftplus
false
7,754
[ "MIT" ]
16
37258b5ce2c653436b3e819b58d2659052d6edcc
https://github.com/BaratiLab/AugLiChem/tree/37258b5ce2c653436b3e819b58d2659052d6edcc
Upsample
import torch import torch.nn as nn import torch.utils.data import torch.nn.functional as F class Upsample(nn.Module): def __init__(self, scale_factor=2, size=None): super(Upsample, self).__init__() self.upsample = F.upsample_nearest self.size = size self.scale_factor = scale_factor def forward(self, x): x = self.upsample(x, size=self.size, scale_factor=self.scale_factor) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp9, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_index_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class UpsampleNew(nn.Module): def __init__(self, scale_factor=2, size=None): super(UpsampleNew, self).__init__() self.upsample = F.upsample_nearest self.size = size self.scale_factor = scale_factor def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Bhaskers-Blu-Org1/gfmn
Upsample
false
7,756
[ "Apache-2.0" ]
15
52b4fd005f8c52297bd6aa5d93e4a1c8d46f56ce
https://github.com/Bhaskers-Blu-Org1/gfmn/tree/52b4fd005f8c52297bd6aa5d93e4a1c8d46f56ce
ChamferLoss
import torch import torch.nn as nn def batch_pairwise_dist(x, y): _bs, num_points_x, _points_dim = x.size() _, num_points_y, _ = y.size() xx = torch.bmm(x, x.transpose(2, 1)) yy = torch.bmm(y, y.transpose(2, 1)) zz = torch.bmm(x, y.transpose(2, 1)) diag_ind_x = torch.arange(0, num_points_x) diag_ind_y = torch.arange(0, num_points_y) if x.get_device() != -1: diag_ind_x = diag_ind_x diag_ind_y = diag_ind_y rx = xx[:, diag_ind_x, diag_ind_x].unsqueeze(1).expand_as(zz.transpose( 2, 1)) ry = yy[:, diag_ind_y, diag_ind_y].unsqueeze(1).expand_as(zz) P = rx.transpose(2, 1) + ry - 2 * zz return P class ChamferLoss(nn.Module): def __init__(self): super(ChamferLoss, self).__init__() self.use_cuda = torch.cuda.is_available() def forward(self, preds, gts): P = batch_pairwise_dist(gts, preds) mins, _ = torch.min(P, 1) loss_1 = torch.sum(mins) mins, _ = torch.min(P, 2) loss_2 = torch.sum(mins) return loss_1 + loss_2 def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_min_mul_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex // 4 r0 = rindex % 4 r2 = rindex tmp0 = tl.load(in_ptr0 + 16 * r1, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (5 * r0 + 16 * r1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (r0 + 16 * r1), None) tmp7 = tl.load(in_ptr0 + (5 + 16 * r1), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (4 + r0 + 16 * r1), None) tmp13 = tl.load(in_ptr0 + (10 + 16 * r1), None, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr2 + (8 + r0 + 16 * r1), None) tmp19 = tl.load(in_ptr0 + (15 + 16 * r1), None, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr2 + (12 + r0 + 16 * r1), None) tmp28 = tl.load(in_ptr0 + (5 * r0 + 16 * r1), None, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr1 + 16 * r1, None, eviction_policy='evict_last') tmp31 = tl.load(in_ptr2 + 4 * r2, None, eviction_policy='evict_last') tmp34 = tl.load(in_ptr1 + (5 + 16 * r1), None, eviction_policy='evict_last' ) tmp36 = tl.load(in_ptr2 + (1 + 4 * r2), None, eviction_policy='evict_last') tmp40 = tl.load(in_ptr1 + (10 + 16 * r1), None, eviction_policy= 'evict_last') tmp42 = tl.load(in_ptr2 + (2 + 4 * r2), None, eviction_policy='evict_last') tmp46 = tl.load(in_ptr1 + (15 + 16 * r1), None, eviction_policy= 'evict_last') tmp48 = tl.load(in_ptr2 + (3 + 4 * r2), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = 2.0 tmp5 = tmp3 * tmp4 tmp6 = tmp2 - tmp5 tmp8 = tmp7 + tmp1 tmp10 = tmp9 * tmp4 tmp11 = tmp8 - tmp10 tmp12 = triton_helpers.minimum(tmp6, tmp11) tmp14 = tmp13 + tmp1 tmp16 = tmp15 * tmp4 tmp17 = tmp14 - tmp16 tmp18 = triton_helpers.minimum(tmp12, tmp17) tmp20 = tmp19 + tmp1 tmp22 = tmp21 * tmp4 tmp23 = tmp20 - tmp22 tmp24 = triton_helpers.minimum(tmp18, tmp23) tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.sum(tmp25, 1)[:, None] tmp30 = tmp28 + tmp29 tmp32 = tmp31 * tmp4 tmp33 = tmp30 - tmp32 tmp35 = tmp28 + tmp34 tmp37 = tmp36 * tmp4 tmp38 = tmp35 - tmp37 tmp39 = triton_helpers.minimum(tmp33, tmp38) tmp41 = tmp28 + tmp40 tmp43 = tmp42 * tmp4 tmp44 = tmp41 - tmp43 tmp45 = triton_helpers.minimum(tmp39, tmp44) tmp47 = tmp28 + tmp46 tmp49 = tmp48 * tmp4 tmp50 = tmp47 - tmp49 tmp51 = triton_helpers.minimum(tmp45, tmp50) tmp52 = tl.broadcast_to(tmp51, [XBLOCK, RBLOCK]) tmp54 = tl.sum(tmp52, 1)[:, None] tmp55 = tmp27 + tmp54 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp55, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg0_1, reinterpret_tensor(arg0_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg1_1, reinterpret_tensor(arg1_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg0_1, reinterpret_tensor(arg1_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf2) del arg0_1 del arg1_1 buf5 = empty_strided_cuda((), (), torch.float32) buf7 = buf5 del buf5 get_raw_stream(0) triton_per_fused_add_min_mul_sub_sum_0[grid(1)](buf7, buf0, buf1, buf2, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 del buf2 return buf7, def batch_pairwise_dist(x, y): _bs, num_points_x, _points_dim = x.size() _, num_points_y, _ = y.size() xx = torch.bmm(x, x.transpose(2, 1)) yy = torch.bmm(y, y.transpose(2, 1)) zz = torch.bmm(x, y.transpose(2, 1)) diag_ind_x = torch.arange(0, num_points_x) diag_ind_y = torch.arange(0, num_points_y) if x.get_device() != -1: diag_ind_x = diag_ind_x diag_ind_y = diag_ind_y rx = xx[:, diag_ind_x, diag_ind_x].unsqueeze(1).expand_as(zz.transpose( 2, 1)) ry = yy[:, diag_ind_y, diag_ind_y].unsqueeze(1).expand_as(zz) P = rx.transpose(2, 1) + ry - 2 * zz return P class ChamferLossNew(nn.Module): def __init__(self): super(ChamferLossNew, self).__init__() self.use_cuda = torch.cuda.is_available() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
AnTao97/UnsupervisedPointCloudSegmentation
ChamferLoss
false
7,757
[ "MIT" ]
13
9bcf0bdf3b1ae62421d9202eb7c0b014d6a69c02
https://github.com/AnTao97/UnsupervisedPointCloudSegmentation/tree/9bcf0bdf3b1ae62421d9202eb7c0b014d6a69c02
Step
import torch import torch.nn as nn class StepF(torch.autograd.Function): """ A step function that returns values in {-1, 1} and uses the Straigh-Through Estimator to update upstream weights in the network """ @staticmethod def forward(ctx, input_): ctx.save_for_backward(input_) output = torch.sign(input_).clamp(min=0) * 2 - 1 return output @staticmethod def backward(ctx, grad_output): input_, = ctx.saved_tensors grad_input = None if ctx.needs_input_grad[0]: grad_input = grad_output return grad_input class Step(nn.Module): """Module wrapper for a step function (StepF). """ def __init__(self): super(Step, self).__init__() def __repr__(self): s = '{name}(low=-1, high=1)' return s.format(name=self.__class__.__name__) def forward(self, x): return StepF.apply(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_mul_sign_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp1 < tmp0 tmp3 = tmp2.to(tl.int8) tmp4 = tmp0 < tmp1 tmp5 = tmp4.to(tl.int8) tmp6 = tmp3 - tmp5 tmp7 = tmp6.to(tmp0.dtype) tmp8 = 0.0 tmp9 = triton_helpers.maximum(tmp7, tmp8) tmp10 = 2.0 tmp11 = tmp9 * tmp10 tmp12 = 1.0 tmp13 = tmp11 - tmp12 tl.store(out_ptr0 + x0, tmp13, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_mul_sign_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class StepF(torch.autograd.Function): """ A step function that returns values in {-1, 1} and uses the Straigh-Through Estimator to update upstream weights in the network """ @staticmethod def forward(ctx, input_): ctx.save_for_backward(input_) output = torch.sign(input_).clamp(min=0) * 2 - 1 return output @staticmethod def backward(ctx, grad_output): input_, = ctx.saved_tensors grad_input = None if ctx.needs_input_grad[0]: grad_input = grad_output return grad_input class StepNew(nn.Module): """Module wrapper for a step function (StepF). """ def __init__(self): super(StepNew, self).__init__() def __repr__(self): s = '{name}(low=-1, high=1)' return s.format(name=self.__class__.__name__) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Bhaskers-Blu-Org1/online-alt-min
Step
false
7,758
[ "Apache-2.0" ]
23
ef31aaad639c0880df8700d34613164298bcadd0
https://github.com/Bhaskers-Blu-Org1/online-alt-min/tree/ef31aaad639c0880df8700d34613164298bcadd0
BilinearAttention
import torch import torch.utils.data from torch import nn class BilinearAttention(nn.Module): """ :param enc_dim: Scalar. :param dec_dim: Scalar """ def __init__(self, enc_dim, dec_dim): super(BilinearAttention, self).__init__() self.W = nn.Linear(enc_dim, dec_dim) def forward(self, h, s): """ :param h: (N, Tx, Cx) Tensor. Encoder outputs :param s: (N, Ty/r, Cx) Tensor. Decoder inputs (previous decoder outputs) Returns: A: (N, Ty/r, Tx) Tensor. attention """ wh = self.W(h) e = torch.matmul(wh, s.transpose(1, 2)) A = torch.softmax(e.transpose(1, 2), dim=-1) return A def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'enc_dim': 4, 'dec_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x5 = xindex // 4 x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + 4 * x5, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x5), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x5), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x5), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](primals_4, buf1, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_4 buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf2) buf3 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 4, 16, 1), 0) del buf0 triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf3 return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf4, reinterpret_tensor(buf1, (16, 4, 4), (16, 1, 4), 0) class BilinearAttentionNew(nn.Module): """ :param enc_dim: Scalar. :param dec_dim: Scalar """ def __init__(self, enc_dim, dec_dim): super(BilinearAttentionNew, self).__init__() self.W = nn.Linear(enc_dim, dec_dim) def forward(self, input_0, input_1): primals_1 = self.W.weight primals_2 = self.W.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
AstraliteHeart/cookietts
BilinearAttention
false
7,759
[ "BSD-3-Clause" ]
25
c871f5f7b5790656d5b57bcd9e63946a2da52f0f
https://github.com/AstraliteHeart/cookietts/tree/c871f5f7b5790656d5b57bcd9e63946a2da52f0f
InnerProductLoss
import torch import torch.nn as nn import torch.nn.functional as F class InnerProductLoss(nn.Module): """This is the inner-product loss used in CFKG for optimization. """ def __init__(self): super(InnerProductLoss, self).__init__() def forward(self, anchor, positive, negative): pos_score = torch.mul(anchor, positive).sum(dim=1) neg_score = torch.mul(anchor, negative).sum(dim=1) return (F.softplus(-pos_score) + F.softplus(neg_score)).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_mean_mul_neg_softplus_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp4 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp7 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp8 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp11 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp12 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp16 = tl.load(in_ptr2 + (r0 + 64 * r1), None) tmp18 = tl.load(in_ptr2 + (16 + r0 + 64 * r1), None) tmp21 = tl.load(in_ptr2 + (32 + r0 + 64 * r1), None) tmp24 = tl.load(in_ptr2 + (48 + r0 + 64 * r1), None) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp15 = -tmp14 tmp17 = tmp0 * tmp16 tmp19 = tmp3 * tmp18 tmp20 = tmp17 + tmp19 tmp22 = tmp7 * tmp21 tmp23 = tmp20 + tmp22 tmp25 = tmp11 * tmp24 tmp26 = tmp23 + tmp25 tmp27 = 20.0 tmp28 = tmp15 > tmp27 tmp29 = tl_math.exp(tmp15) tmp30 = libdevice.log1p(tmp29) tmp31 = tl.where(tmp28, tmp15, tmp30) tmp32 = tmp26 > tmp27 tmp33 = tl_math.exp(tmp26) tmp34 = libdevice.log1p(tmp33) tmp35 = tl.where(tmp32, tmp26, tmp34) tmp36 = tmp31 + tmp35 tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp39 = tl.sum(tmp37, 1)[:, None] tmp40 = 64.0 tmp41 = tmp39 / tmp40 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp41, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 get_raw_stream(0) triton_per_fused_add_mean_mul_neg_softplus_sum_0[grid(1)](buf3, arg1_1, arg0_1, arg2_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf3, class InnerProductLossNew(nn.Module): """This is the inner-product loss used in CFKG for optimization. """ def __init__(self): super(InnerProductLossNew, self).__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
BELIEVEfxy/LightSANs
InnerProductLoss
false
7,760
[ "MIT" ]
17
94ce7e59d144dbc787153b8c486cad334790ec6e
https://github.com/BELIEVEfxy/LightSANs/tree/94ce7e59d144dbc787153b8c486cad334790ec6e
Conv1d2Score
import torch import torch.nn as nn import torch.optim import torch.utils.data class Conv1d2Score(nn.Module): """Calculate a N*out_dim tensor from N*in_dim*seq_len using nn.Conv1d Essentially it is a linear layer Args: in_dim: int out_dim: int, usually number of classes seq_len: int Shape: - Input: N*in_dim*seq_len - Output: N*out_dim Attributes: weight (Tensor): the learnable weights of the module of shape out_channels (out_dim) * in_channels (in_dim) * kernel_size (seq_len) bias (Tensor): shape: out_channels (out_dim) Examples:: >>> x = torch.randn(2, 3, 4, device=device) >>> model = Conv1d2Score(3, 5, 4) >>> model(x).shape """ def __init__(self, in_dim, out_dim, seq_len, bias=True): super(Conv1d2Score, self).__init__() self.conv = nn.Conv1d(in_dim, out_dim, kernel_size=seq_len, bias=bias) def forward(self, x): out = self.conv(x).squeeze(-1) return out def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4, 'seq_len': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 1), (4, 1, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(4)](buf1, primals_2, 4, XBLOCK= 4, num_warps=1, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (4,), (1,), 0 ), primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0) class Conv1d2ScoreNew(nn.Module): """Calculate a N*out_dim tensor from N*in_dim*seq_len using nn.Conv1d Essentially it is a linear layer Args: in_dim: int out_dim: int, usually number of classes seq_len: int Shape: - Input: N*in_dim*seq_len - Output: N*out_dim Attributes: weight (Tensor): the learnable weights of the module of shape out_channels (out_dim) * in_channels (in_dim) * kernel_size (seq_len) bias (Tensor): shape: out_channels (out_dim) Examples:: >>> x = torch.randn(2, 3, 4, device=device) >>> model = Conv1d2Score(3, 5, 4) >>> model(x).shape """ def __init__(self, in_dim, out_dim, seq_len, bias=True): super(Conv1d2ScoreNew, self).__init__() self.conv = nn.Conv1d(in_dim, out_dim, kernel_size=seq_len, bias=bias) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
BeautyOfWeb/VIN
Conv1d2Score
false
7,761
[ "MIT" ]
34
53343d28130f5fd6e5badb58daf8079a5933fd6a
https://github.com/BeautyOfWeb/VIN/tree/53343d28130f5fd6e5badb58daf8079a5933fd6a
IOUloss
import torch import torch.nn as nn class IOUloss(nn.Module): def __init__(self, reduction='none', loss_type='iou'): super(IOUloss, self).__init__() self.reduction = reduction self.loss_type = loss_type def forward(self, pred, target): assert pred.shape[0] == target.shape[0] pred = pred.view(-1, 4) target = target.view(-1, 4) tl = torch.max(pred[:, :2] - pred[:, 2:] / 2, target[:, :2] - target[:, 2:] / 2) br = torch.min(pred[:, :2] + pred[:, 2:] / 2, target[:, :2] + target[:, 2:] / 2) area_p = torch.prod(pred[:, 2:], 1) area_g = torch.prod(target[:, 2:], 1) en = (tl < br).type(tl.type()).prod(dim=1) area_i = torch.prod(br - tl, 1) * en area_u = area_p + area_g - area_i iou = area_i / (area_u + 1e-16) if self.loss_type == 'iou': loss = 1 - iou ** 2 elif self.loss_type == 'giou': c_tl = torch.min(pred[:, :2] - pred[:, 2:] / 2, target[:, :2] - target[:, 2:] / 2) c_br = torch.max(pred[:, :2] + pred[:, 2:] / 2, target[:, :2] + target[:, 2:] / 2) area_c = torch.prod(c_br - c_tl, 1) giou = iou - (area_c - area_u) / area_c.clamp(1e-16) loss = 1 - giou.clamp(min=-1.0, max=1.0) if self.reduction == 'mean': loss = loss.mean() elif self.reduction == 'sum': loss = loss.sum() return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy_add_div_lt_maximum_minimum_mul_pow_prod_rsub_sub_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tmp7 = tmp6 * tmp2 tmp8 = tmp5 + tmp7 tmp9 = triton_helpers.minimum(tmp4, tmp8) tmp10 = tmp0 - tmp3 tmp11 = tmp5 - tmp7 tmp12 = triton_helpers.maximum(tmp10, tmp11) tmp13 = tmp9 - tmp12 tmp16 = tmp15 * tmp2 tmp17 = tmp14 + tmp16 tmp20 = tmp19 * tmp2 tmp21 = tmp18 + tmp20 tmp22 = triton_helpers.minimum(tmp17, tmp21) tmp23 = tmp14 - tmp16 tmp24 = tmp18 - tmp20 tmp25 = triton_helpers.maximum(tmp23, tmp24) tmp26 = tmp22 - tmp25 tmp27 = tmp13 * tmp26 tmp28 = tmp12 < tmp9 tmp29 = tmp28.to(tl.float32) tmp30 = tmp25 < tmp22 tmp31 = tmp30.to(tl.float32) tmp32 = tmp29 * tmp31 tmp33 = tmp27 * tmp32 tmp34 = tmp1 * tmp15 tmp35 = tmp6 * tmp19 tmp36 = tmp34 + tmp35 tmp37 = tmp36 - tmp33 tmp38 = 1e-16 tmp39 = tmp37 + tmp38 tmp40 = tmp33 / tmp39 tmp41 = tmp40 * tmp40 tmp42 = 1.0 tmp43 = tmp42 - tmp41 tl.store(in_out_ptr0 + x0, tmp43, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64,), (1,), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused__to_copy_add_div_lt_maximum_minimum_mul_pow_prod_rsub_sub_0[ grid(64)](buf1, arg0_1, arg1_1, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf1, class IOUlossNew(nn.Module): def __init__(self, reduction='none', loss_type='iou'): super(IOUlossNew, self).__init__() self.reduction = reduction self.loss_type = loss_type def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Arui66/YOLOX
IOUloss
false
7,762
[ "Apache-2.0" ]
16
7ee17936db849600817d7de05269bfdfb1a0eb48
https://github.com/Arui66/YOLOX/tree/7ee17936db849600817d7de05269bfdfb1a0eb48
WeightedView
import torch import torch.nn as nn import torch.optim import torch.utils.data class WeightedView(nn.Module): """Calculate weighted view Args: num_groups: int, number of groups (views) reduce_dimension: bool, default False. If True, reduce dimension dim dim: default -1. Only used when reduce_dimension is True Shape: - Input: if dim is None, (N, num_features*num_groups) - Output: (N, num_features) Attributes: weight: (num_groups) Examples: >>> model = WeightedView(3) >>> x = Variable(torch.randn(1, 6)) >>> print(model(x)) >>> model = WeightedView(3, True, 1) >>> model(x.view(1,3,2)) """ def __init__(self, num_groups, reduce_dimension=False, dim=-1): super(WeightedView, self).__init__() self.num_groups = num_groups self.reduce_dimension = reduce_dimension self.dim = dim self.weight = nn.Parameter(torch.Tensor(num_groups)) self.weight.data.uniform_(-1.0 / num_groups, 1.0 / num_groups) def forward(self, x): self.normalized_weight = nn.functional.softmax(self.weight, dim=0) if self.reduce_dimension: assert x.size(self.dim) == self.num_groups dim = self.dim if self.dim >= 0 else self.dim + x.dim() if dim == x.dim() - 1: out = (x * self.weight).sum(-1) else: out = torch.transpose((x.transpose(dim, -1) * self. normalized_weight).sum(-1), dim, -1) else: assert x.dim() == 2 num_features = x.size(-1) // self.num_groups out = (x.view(-1, self.num_groups, num_features).transpose(1, - 1) * self.normalized_weight).sum(-1) return out def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'num_groups': 1}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tmp1 - tmp1 tmp3 = tl_math.exp(tmp2) tmp4 = tmp3 / tmp3 tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp4, None) @triton.jit def triton_poi_fused_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1,), (1,)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1,), (1,), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(1)](primals_1, buf0, 1, XBLOCK=1, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mul_sum_1[grid(16)](primals_2, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf1, buf0, primals_2, buf0 class WeightedViewNew(nn.Module): """Calculate weighted view Args: num_groups: int, number of groups (views) reduce_dimension: bool, default False. If True, reduce dimension dim dim: default -1. Only used when reduce_dimension is True Shape: - Input: if dim is None, (N, num_features*num_groups) - Output: (N, num_features) Attributes: weight: (num_groups) Examples: >>> model = WeightedView(3) >>> x = Variable(torch.randn(1, 6)) >>> print(model(x)) >>> model = WeightedView(3, True, 1) >>> model(x.view(1,3,2)) """ def __init__(self, num_groups, reduce_dimension=False, dim=-1): super(WeightedViewNew, self).__init__() self.num_groups = num_groups self.reduce_dimension = reduce_dimension self.dim = dim self.weight = nn.Parameter(torch.Tensor(num_groups)) self.weight.data.uniform_(-1.0 / num_groups, 1.0 / num_groups) def forward(self, input_0): primals_1 = self.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
BeautyOfWeb/AffinityNet
WeightedView
false
7,763
[ "MIT" ]
34
d3f79823fa0182328894483165d4f0853740ee53
https://github.com/BeautyOfWeb/AffinityNet/tree/d3f79823fa0182328894483165d4f0853740ee53
LearnedUpsampling1d
import torch from torch import nn class LearnedUpsampling1d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, bias=True): super().__init__() self.conv_t = nn.ConvTranspose1d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride= kernel_size, bias=False) if bias: self.bias = nn.Parameter(torch.FloatTensor(out_channels, kernel_size)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): self.conv_t.reset_parameters() nn.init.constant(self.bias, 0) def forward(self, input): batch_size, _, length = input.size() kernel_size, = self.conv_t.kernel_size bias = self.bias.unsqueeze(0).unsqueeze(2).expand(batch_size, self. conv_t.out_channels, length, kernel_size).contiguous().view( batch_size, self.conv_t.out_channels, length * kernel_size) return self.conv_t(input) + bias def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (4 * x1 + x0 % 4), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_3, stride=(4,), padding=(0,), dilation=(1,), transposed=True, output_padding=(0 ,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 16), (64, 16, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class LearnedUpsampling1dNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, bias=True): super().__init__() self.conv_t = nn.ConvTranspose1d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride= kernel_size, bias=False) if bias: self.bias = nn.Parameter(torch.FloatTensor(out_channels, kernel_size)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): self.conv_t.reset_parameters() nn.init.constant(self.bias, 0) def forward(self, input_0): primals_2 = self.bias primals_1 = self.conv_t.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Barbany/Multi-speaker-Neural-Vocoder
LearnedUpsampling1d
false
7,764
[ "MIT" ]
13
a3f5c266603b17bcbe264e750947140f302272c8
https://github.com/Barbany/Multi-speaker-Neural-Vocoder/tree/a3f5c266603b17bcbe264e750947140f302272c8
InnerProductLayer
import torch import torch.nn as nn class InnerProductLayer(nn.Module): """InnerProduct Layer used in PNN that compute the element-wise product or inner product between feature vectors. """ def __init__(self, num_feature_field, device): """ Args: num_feature_field(int) :number of feature fields. device(torch.device) : device object of the model. """ super(InnerProductLayer, self).__init__() self.num_feature_field = num_feature_field self def forward(self, feat_emb): """ Args: feat_emb(torch.FloatTensor) :3D tensor with shape: [batch_size,num_pairs,embedding_size]. Returns: inner_product(torch.FloatTensor): The inner product of input tensor. shape of [batch_size, num_pairs] """ row = [] col = [] for i in range(self.num_feature_field - 1): for j in range(i + 1, self.num_feature_field): row.append(i) col.append(j) p = feat_emb[:, row] q = feat_emb[:, col] inner_product = p * q return inner_product.sum(dim=-1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_feature_field': 4, 'device': 0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_index_mul_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 6 x0 = xindex % 4 x2 = xindex // 24 x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 3, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 2, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tl.full([1], 0, tl.int64) tmp8 = tl.where(tmp6, tmp7, tmp7) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tl.full([1], 4, tl.int64) tmp11 = tmp0 < tmp10 tmp12 = tl.full([1], 5, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.where(tmp13, tmp3, tmp5) tmp15 = tl.where(tmp11, tmp3, tmp14) tmp16 = tl.where(tmp2, tmp9, tmp15) tmp17 = tl.load(in_ptr0 + (4 * x0 + 16 * tmp16 + 64 * x2), xmask, eviction_policy='evict_last') tmp18 = tl.where(tmp6, tmp5, tmp1) tmp19 = tl.where(tmp4, tmp3, tmp18) tmp20 = tl.where(tmp13, tmp1, tmp1) tmp21 = tl.where(tmp11, tmp5, tmp20) tmp22 = tl.where(tmp2, tmp19, tmp21) tmp23 = tl.load(in_ptr0 + (4 * x0 + 16 * tmp22 + 64 * x2), xmask, eviction_policy='evict_last') tmp24 = tmp17 * tmp23 tmp25 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * tmp16 + 64 * x2), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * tmp22 + 64 * x2), xmask, eviction_policy='evict_last') tmp27 = tmp25 * tmp26 tmp28 = tmp24 + tmp27 tmp29 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * tmp16 + 64 * x2), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * tmp22 + 64 * x2), xmask, eviction_policy='evict_last') tmp31 = tmp29 * tmp30 tmp32 = tmp28 + tmp31 tmp33 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * tmp16 + 64 * x2), xmask, eviction_policy='evict_last') tmp34 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * tmp22 + 64 * x2), xmask, eviction_policy='evict_last') tmp35 = tmp33 * tmp34 tmp36 = tmp32 + tmp35 tl.store(out_ptr0 + x3, tmp36, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 4), (24, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_index_mul_sum_0[grid(96)](arg0_1, buf0, 96, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 return buf0, class InnerProductLayerNew(nn.Module): """InnerProduct Layer used in PNN that compute the element-wise product or inner product between feature vectors. """ def __init__(self, num_feature_field, device): """ Args: num_feature_field(int) :number of feature fields. device(torch.device) : device object of the model. """ super(InnerProductLayerNew, self).__init__() self.num_feature_field = num_feature_field self def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
BELIEVEfxy/LightSANs
InnerProductLayer
false
7,765
[ "MIT" ]
17
94ce7e59d144dbc787153b8c486cad334790ec6e
https://github.com/BELIEVEfxy/LightSANs/tree/94ce7e59d144dbc787153b8c486cad334790ec6e
ConvLSTMCell
import torch import torch.nn as nn class ConvLSTMCell(nn.Module): """ Implementation of the Basic ConvLSTM. No peephole connection, no forget gate. ConvLSTM: x - input h - hidden representation c - memory cell f - forget gate o - output gate Reference:Convolutional LSTM Network: A Machine Learning Approach for Precipitation Nowcasting """ def __init__(self, input_channels, hidden_channels, kernel_size): super(ConvLSTMCell, self).__init__() self.input_channels = input_channels self.hidden_channels = hidden_channels self.kernel_size = kernel_size self.num_features = 4 self.padding = int((kernel_size - 1) / 2) self.W_i = nn.Conv2d(self.input_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=True) self.W_f = nn.Conv2d(self.input_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=True) self.W_o = nn.Conv2d(self.input_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=True) self.W_c = nn.Conv2d(self.input_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=True) self.reset_parameters() def forward(self, inputs, c): i_t = torch.sigmoid(self.W_i(inputs)) f_t = torch.sigmoid(self.W_f(inputs)) o_t = torch.sigmoid(self.W_o(inputs)) c_t = f_t * c + i_t * torch.tanh(self.W_c(inputs)) h_t = o_t * torch.tanh(c_t) return h_t, c_t def reset_parameters(self): self.W_i.reset_parameters() self.W_f.reset_parameters() self.W_o.reset_parameters() self.W_c.reset_parameters() def get_inputs(): return [torch.rand([4, 4, 3, 3]), torch.rand([4, 4, 2, 2])] def get_init_inputs(): return [[], {'input_channels': 4, 'hidden_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_convolution_mul_sigmoid_tanh_0(in_out_ptr0, in_out_ptr1, in_out_ptr2, in_out_ptr3, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + x3, xmask) tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_out_ptr2 + x3, xmask) tmp7 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_out_ptr3 + x3, xmask) tmp10 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr4 + x3, xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp8 = tmp6 + tmp7 tmp11 = tmp9 + tmp10 tmp12 = tl.sigmoid(tmp5) tmp14 = tmp12 * tmp13 tmp15 = tl.sigmoid(tmp2) tmp16 = libdevice.tanh(tmp11) tmp17 = tmp15 * tmp16 tmp18 = tmp14 + tmp17 tmp19 = tl.sigmoid(tmp8) tmp20 = libdevice.tanh(tmp18) tmp21 = tmp19 * tmp20 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(in_out_ptr1 + x3, tmp5, xmask) tl.store(in_out_ptr2 + x3, tmp8, xmask) tl.store(in_out_ptr3 + x3, tmp11, xmask) tl.store(out_ptr0 + x3, tmp18, xmask) tl.store(out_ptr1 + x3, tmp21, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 2, 2), (16, 4, 2, 1)) assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_10, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1)) buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 2, 2), (16, 4, 2, 1)) buf4 = extern_kernels.convolution(primals_3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 2, 2), (16, 4, 2, 1)) buf6 = extern_kernels.convolution(primals_3, primals_9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 2, 2), (16, 4, 2, 1)) buf1 = buf0 del buf0 buf3 = buf2 del buf2 buf5 = buf4 del buf4 buf7 = buf6 del buf6 buf8 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf9 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_convolution_mul_sigmoid_tanh_0[grid(64)](buf1, buf3, buf5, buf7, primals_2, primals_5, primals_7, primals_10, primals_8, buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_10 del primals_2 del primals_5 del primals_7 return (buf9, buf8, primals_1, primals_3, primals_4, primals_6, primals_8, primals_9, buf1, buf3, buf5, buf7, buf8) class ConvLSTMCellNew(nn.Module): """ Implementation of the Basic ConvLSTM. No peephole connection, no forget gate. ConvLSTM: x - input h - hidden representation c - memory cell f - forget gate o - output gate Reference:Convolutional LSTM Network: A Machine Learning Approach for Precipitation Nowcasting """ def __init__(self, input_channels, hidden_channels, kernel_size): super(ConvLSTMCellNew, self).__init__() self.input_channels = input_channels self.hidden_channels = hidden_channels self.kernel_size = kernel_size self.num_features = 4 self.padding = int((kernel_size - 1) / 2) self.W_i = nn.Conv2d(self.input_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=True) self.W_f = nn.Conv2d(self.input_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=True) self.W_o = nn.Conv2d(self.input_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=True) self.W_c = nn.Conv2d(self.input_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=True) self.reset_parameters() def reset_parameters(self): self.W_i.reset_parameters() self.W_f.reset_parameters() self.W_o.reset_parameters() self.W_c.reset_parameters() def forward(self, input_0, input_1): primals_1 = self.W_i.weight primals_2 = self.W_i.bias primals_4 = self.W_f.weight primals_5 = self.W_f.bias primals_6 = self.W_o.weight primals_7 = self.W_o.bias primals_9 = self.W_c.weight primals_10 = self.W_c.bias primals_3 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0], output[1]
BenQLange/AttentionAugmentedConvLSTM
ConvLSTMCell
false
7,766
[ "MIT" ]
30
d8419b7a628b02ac49e8450deb3d60450c7b2d6b
https://github.com/BenQLange/AttentionAugmentedConvLSTM/tree/d8419b7a628b02ac49e8450deb3d60450c7b2d6b
BPRLoss
import torch import torch.nn as nn class BPRLoss(nn.Module): """ BPRLoss, based on Bayesian Personalized Ranking Args: - gamma(float): Small value to avoid division by zero Shape: - Pos_score: (N) - Neg_score: (N), same shape as the Pos_score - Output: scalar. Examples:: >>> loss = BPRLoss() >>> pos_score = torch.randn(3, requires_grad=True) >>> neg_score = torch.randn(3, requires_grad=True) >>> output = loss(pos_score, neg_score) >>> output.backward() """ def __init__(self, gamma=1e-10): super(BPRLoss, self).__init__() self.gamma = gamma def forward(self, pos_score, neg_score): loss = -torch.log(self.gamma + torch.sigmoid(pos_score - neg_score) ).mean() return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_log_mean_neg_sigmoid_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = 1e-10 tmp5 = tmp3 + tmp4 tmp6 = tl_math.log(tmp5) tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = 256.0 tmp11 = tmp9 / tmp10 tmp12 = -tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_log_mean_neg_sigmoid_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class BPRLossNew(nn.Module): """ BPRLoss, based on Bayesian Personalized Ranking Args: - gamma(float): Small value to avoid division by zero Shape: - Pos_score: (N) - Neg_score: (N), same shape as the Pos_score - Output: scalar. Examples:: >>> loss = BPRLoss() >>> pos_score = torch.randn(3, requires_grad=True) >>> neg_score = torch.randn(3, requires_grad=True) >>> output = loss(pos_score, neg_score) >>> output.backward() """ def __init__(self, gamma=1e-10): super(BPRLossNew, self).__init__() self.gamma = gamma def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
BELIEVEfxy/LightSANs
BPRLoss
false
7,767
[ "MIT" ]
17
94ce7e59d144dbc787153b8c486cad334790ec6e
https://github.com/BELIEVEfxy/LightSANs/tree/94ce7e59d144dbc787153b8c486cad334790ec6e
Policy
import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.parallel import torch.utils.data import torch.optim import torch.autograd class Policy(nn.Module): def __init__(self): super(Policy, self).__init__() self.affine1 = nn.Linear(4, 128) self.affine2 = nn.Linear(128, 2) self.saved_actions = [] self.rewards = [] def forward(self, x): x = F.relu(self.affine1(x)) action_scores = self.affine2(x) return F.softmax(action_scores) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.parallel import torch.utils.data import torch.optim import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 8 x2 = xindex // 32 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (8 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (24 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 8 x2 = xindex // 32 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (8 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (24 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (2, 128), (128, 1)) assert_size_stride(primals_5, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1, primals_2, buf5, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 2), (1, 128), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32) triton_poi_fused__softmax_1[grid(128)](buf2, buf3, 128, XBLOCK=128, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 2), (32, 8, 2, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(128)](buf3, buf4, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf3 return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 128), (128, 1), 0 ), buf4, primals_4, buf5 class PolicyNew(nn.Module): def __init__(self): super(PolicyNew, self).__init__() self.affine1 = nn.Linear(4, 128) self.affine2 = nn.Linear(128, 2) self.saved_actions = [] self.rewards = [] def forward(self, input_0): primals_1 = self.affine1.weight primals_2 = self.affine1.bias primals_4 = self.affine2.weight primals_5 = self.affine2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
BestSonny/examples
Policy
false
7,768
[ "BSD-3-Clause" ]
13
4b7365c0db22133d1793e53bb3674c2d0ebaeac1
https://github.com/BestSonny/examples/tree/4b7365c0db22133d1793e53bb3674c2d0ebaeac1
ConvNCFBPRLoss
import torch import torch.nn as nn class ConvNCFBPRLoss(nn.Module): """ ConvNCFBPRLoss, based on Bayesian Personalized Ranking, Shape: - Pos_score: (N) - Neg_score: (N), same shape as the Pos_score - Output: scalar. Examples:: >>> loss = ConvNCFBPRLoss() >>> pos_score = torch.randn(3, requires_grad=True) >>> neg_score = torch.randn(3, requires_grad=True) >>> output = loss(pos_score, neg_score) >>> output.backward() """ def __init__(self): super(ConvNCFBPRLoss, self).__init__() def forward(self, pos_score, neg_score): distance = pos_score - neg_score loss = torch.sum(torch.log(1 + torch.exp(-distance))) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_exp_log_neg_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = -tmp2 tmp4 = tl_math.exp(tmp3) tmp5 = 1.0 tmp6 = tmp4 + tmp5 tmp7 = tl_math.log(tmp6) tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_add_exp_log_neg_sub_sum_0[grid(1)](arg0_1, arg1_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf0, class ConvNCFBPRLossNew(nn.Module): """ ConvNCFBPRLoss, based on Bayesian Personalized Ranking, Shape: - Pos_score: (N) - Neg_score: (N), same shape as the Pos_score - Output: scalar. Examples:: >>> loss = ConvNCFBPRLoss() >>> pos_score = torch.randn(3, requires_grad=True) >>> neg_score = torch.randn(3, requires_grad=True) >>> output = loss(pos_score, neg_score) >>> output.backward() """ def __init__(self): super(ConvNCFBPRLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
BELIEVEfxy/LightSANs
ConvNCFBPRLoss
false
7,769
[ "MIT" ]
17
94ce7e59d144dbc787153b8c486cad334790ec6e
https://github.com/BELIEVEfxy/LightSANs/tree/94ce7e59d144dbc787153b8c486cad334790ec6e
AttLayer
import torch import torch.nn as nn import torch.nn.functional as fn class AttLayer(nn.Module): """Calculate the attention signal(weight) according the input tensor. Args: infeatures (torch.FloatTensor): A 3D input tensor with shape of[batch_size, M, embed_dim]. Returns: torch.FloatTensor: Attention weight of input. shape of [batch_size, M]. """ def __init__(self, in_dim, att_dim): super(AttLayer, self).__init__() self.in_dim = in_dim self.att_dim = att_dim self.w = torch.nn.Linear(in_features=in_dim, out_features=att_dim, bias=False) self.h = nn.Parameter(torch.randn(att_dim), requires_grad=True) def forward(self, infeatures): att_singal = self.w(infeatures) att_singal = fn.relu(att_singal) att_singal = torch.mul(att_singal, self.h) att_singal = torch.sum(att_singal, dim=2) att_singal = fn.softmax(att_singal, dim=1) return att_singal def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'att_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_relu_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp9 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp13 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 * tmp3 tmp6 = triton_helpers.maximum(tmp1, tmp5) tmp7 = tmp6 * tmp3 tmp8 = tmp4 + tmp7 tmp10 = triton_helpers.maximum(tmp1, tmp9) tmp11 = tmp10 * tmp3 tmp12 = tmp8 + tmp11 tmp14 = triton_helpers.maximum(tmp1, tmp13) tmp15 = tmp14 * tmp3 tmp16 = tmp12 + tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_relu_sum_0[grid(64)](buf0, primals_3, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 triton_poi_fused__softmax_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 return buf3, primals_3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), buf0, buf3 class AttLayerNew(nn.Module): """Calculate the attention signal(weight) according the input tensor. Args: infeatures (torch.FloatTensor): A 3D input tensor with shape of[batch_size, M, embed_dim]. Returns: torch.FloatTensor: Attention weight of input. shape of [batch_size, M]. """ def __init__(self, in_dim, att_dim): super(AttLayerNew, self).__init__() self.in_dim = in_dim self.att_dim = att_dim self.w = torch.nn.Linear(in_features=in_dim, out_features=att_dim, bias=False) self.h = nn.Parameter(torch.randn(att_dim), requires_grad=True) def forward(self, input_0): primals_3 = self.h primals_1 = self.w.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
BELIEVEfxy/LightSANs
AttLayer
false
7,770
[ "MIT" ]
17
94ce7e59d144dbc787153b8c486cad334790ec6e
https://github.com/BELIEVEfxy/LightSANs/tree/94ce7e59d144dbc787153b8c486cad334790ec6e
SpanClassifier
import torch import torch.nn as nn from torch.nn import BCELoss class SpanClassifier(nn.Module): """given the span embeddings, classify whether their relations""" def __init__(self, d_inp): super(SpanClassifier, self).__init__() self.d_inp = d_inp self.bilinear_layer = nn.Bilinear(d_inp, d_inp, 1) self.output = nn.Sigmoid() self.loss = BCELoss() def forward(self, span_emb_1, span_emb_2, label=None): """Calculate the similarity as bilinear product of span embeddings. Args: span_emb_1: [batch_size, hidden] (Tensor) hidden states for span_1 span_emb_2: [batch_size, hidden] (Tensor) hidden states for span_2 label: [batch_size] 0/1 Tensor, if none is supplied do prediction. """ similarity = self.bilinear_layer(span_emb_1, span_emb_2) probs = self.output(similarity) outputs = similarity, if label is not None: cur_loss = self.loss(probs, label) outputs = (cur_loss,) + outputs return outputs def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_inp': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.nn import BCELoss assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten._trilinear.default(reinterpret_tensor( primals_4, (64, 4), (4, 1), 0), primals_1, reinterpret_tensor( primals_3, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) del primals_1 buf1 = buf0 del buf0 buf2 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 get_raw_stream(0) triton_poi_fused_add_0[grid(64)](buf2, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 return buf2, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class SpanClassifierNew(nn.Module): """given the span embeddings, classify whether their relations""" def __init__(self, d_inp): super(SpanClassifierNew, self).__init__() self.d_inp = d_inp self.bilinear_layer = nn.Bilinear(d_inp, d_inp, 1) self.output = nn.Sigmoid() self.loss = BCELoss() def forward(self, input_0, input_1): primals_1 = self.bilinear_layer.weight primals_2 = self.bilinear_layer.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Bhaskers-Blu-Org1/superglue-mtl
SpanClassifier
false
7,771
[ "Apache-2.0" ]
15
1eb3e581c0ef3b4c261e0256ec26116d2b657c40
https://github.com/Bhaskers-Blu-Org1/superglue-mtl/tree/1eb3e581c0ef3b4c261e0256ec26116d2b657c40
RegLoss
import torch import torch.nn as nn class RegLoss(nn.Module): """ RegLoss, L2 regularization on model parameters """ def __init__(self): super(RegLoss, self).__init__() def forward(self, parameters): reg_loss = None for W in parameters: if reg_loss is None: reg_loss = W.norm(2) else: reg_loss = reg_loss + W.norm(2) return reg_loss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_linalg_vector_norm_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp5 = tl.load(in_ptr0 + (64 + r0), None) tmp10 = tl.load(in_ptr0 + (128 + r0), None) tmp15 = tl.load(in_ptr0 + (192 + r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.sum(tmp12, 1)[:, None] tmp16 = tmp15 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.sum(tmp17, 1)[:, None] tmp20 = libdevice.sqrt(tmp4) tmp21 = libdevice.sqrt(tmp9) tmp22 = tmp20 + tmp21 tmp23 = libdevice.sqrt(tmp14) tmp24 = tmp22 + tmp23 tmp25 = libdevice.sqrt(tmp19) tmp26 = tmp24 + tmp25 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp26, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf4 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_linalg_vector_norm_0[grid(1)](buf4, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf4, class RegLossNew(nn.Module): """ RegLoss, L2 regularization on model parameters """ def __init__(self): super(RegLossNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
BELIEVEfxy/LightSANs
RegLoss
false
7,772
[ "MIT" ]
17
94ce7e59d144dbc787153b8c486cad334790ec6e
https://github.com/BELIEVEfxy/LightSANs/tree/94ce7e59d144dbc787153b8c486cad334790ec6e
OuterProductLayer
import torch import torch.nn as nn class OuterProductLayer(nn.Module): """OutterProduct Layer used in PNN. This implemention is adapted from code that the author of the paper published on https://github.com/Atomu2014/product-nets. """ def __init__(self, num_feature_field, embedding_size, device): """ Args: num_feature_field(int) :number of feature fields. embedding_size(int) :number of embedding size. device(torch.device) : device object of the model. """ super(OuterProductLayer, self).__init__() self.num_feature_field = num_feature_field num_pairs = int(num_feature_field * (num_feature_field - 1) / 2) embed_size = embedding_size self.kernel = nn.Parameter(torch.rand(embed_size, num_pairs, embed_size), requires_grad=True) nn.init.xavier_uniform_(self.kernel) self def forward(self, feat_emb): """ Args: feat_emb(torch.FloatTensor) :3D tensor with shape: [batch_size,num_pairs,embedding_size]. Returns: outer_product(torch.FloatTensor): The outer product of input tensor. shape of [batch_size, num_pairs] """ row = [] col = [] for i in range(self.num_feature_field - 1): for j in range(i + 1, self.num_feature_field): row.append(i) col.append(j) p = feat_emb[:, row] q = feat_emb[:, col] p.unsqueeze_(dim=1) p = torch.mul(p, self.kernel.unsqueeze(0)) p = torch.sum(p, dim=-1) p = torch.transpose(p, 2, 1) outer_product = p * q return outer_product.sum(dim=-1) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'num_feature_field': 4, 'embedding_size': 4, 'device': 0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x2 = xindex // 24 x3 = xindex % 24 x4 = xindex tmp18 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp0 = x0 tmp1 = tl.full([1], 3, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 2, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tl.full([1], 0, tl.int64) tmp8 = tl.where(tmp6, tmp7, tmp7) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tl.full([1], 4, tl.int64) tmp11 = tmp0 < tmp10 tmp12 = tl.full([1], 5, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.where(tmp13, tmp3, tmp5) tmp15 = tl.where(tmp11, tmp3, tmp14) tmp16 = tl.where(tmp2, tmp9, tmp15) tmp17 = tl.load(in_ptr0 + (4 * tmp16 + 16 * x2), xmask, eviction_policy ='evict_last') tmp19 = tmp17 * tmp18 tmp20 = tl.load(in_ptr0 + (1 + 4 * tmp16 + 16 * x2), xmask, eviction_policy='evict_last') tmp22 = tmp20 * tmp21 tmp23 = tmp19 + tmp22 tmp24 = tl.load(in_ptr0 + (2 + 4 * tmp16 + 16 * x2), xmask, eviction_policy='evict_last') tmp26 = tmp24 * tmp25 tmp27 = tmp23 + tmp26 tmp28 = tl.load(in_ptr0 + (3 + 4 * tmp16 + 16 * x2), xmask, eviction_policy='evict_last') tmp30 = tmp28 * tmp29 tmp31 = tmp27 + tmp30 tl.store(out_ptr0 + x4, tmp31, xmask) @triton.jit def triton_poi_fused_index_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 24 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 24 * x1), xmask) tmp19 = tl.load(in_ptr0 + (6 + x0 + 24 * x1), xmask) tmp23 = tl.load(in_ptr0 + (12 + x0 + 24 * x1), xmask) tmp27 = tl.load(in_ptr0 + (18 + x0 + 24 * x1), xmask) tmp1 = x0 tmp2 = tl.full([1], 3, tl.int64) tmp3 = tmp1 < tmp2 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp1 < tmp4 tmp6 = tl.full([1], 2, tl.int64) tmp7 = tmp1 < tmp6 tmp8 = tl.where(tmp7, tmp6, tmp2) tmp9 = tl.where(tmp5, tmp4, tmp8) tmp10 = tl.full([1], 4, tl.int64) tmp11 = tmp1 < tmp10 tmp12 = tl.full([1], 5, tl.int64) tmp13 = tmp1 < tmp12 tmp14 = tl.where(tmp13, tmp2, tmp2) tmp15 = tl.where(tmp11, tmp6, tmp14) tmp16 = tl.where(tmp3, tmp9, tmp15) tmp17 = tl.load(in_ptr1 + (4 * tmp16 + 16 * x1), xmask, eviction_policy ='evict_last') tmp18 = tmp0 * tmp17 tmp20 = tl.load(in_ptr1 + (1 + 4 * tmp16 + 16 * x1), xmask, eviction_policy='evict_last') tmp21 = tmp19 * tmp20 tmp22 = tmp18 + tmp21 tmp24 = tl.load(in_ptr1 + (2 + 4 * tmp16 + 16 * x1), xmask, eviction_policy='evict_last') tmp25 = tmp23 * tmp24 tmp26 = tmp22 + tmp25 tmp28 = tl.load(in_ptr1 + (3 + 4 * tmp16 + 16 * x1), xmask, eviction_policy='evict_last') tmp29 = tmp27 * tmp28 tmp30 = tmp26 + tmp29 tl.store(out_ptr0 + x2, tmp30, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 6, 4), (24, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6), (24, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sum_0[grid(96)](primals_1, primals_2, buf0, 96, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4, 6), (6, 1), torch.float32) triton_poi_fused_index_mul_sum_1[grid(24)](buf0, primals_1, buf1, 24, XBLOCK=32, num_warps=1, num_stages=1) del buf0 return buf1, primals_1 class OuterProductLayerNew(nn.Module): """OutterProduct Layer used in PNN. This implemention is adapted from code that the author of the paper published on https://github.com/Atomu2014/product-nets. """ def __init__(self, num_feature_field, embedding_size, device): """ Args: num_feature_field(int) :number of feature fields. embedding_size(int) :number of embedding size. device(torch.device) : device object of the model. """ super(OuterProductLayerNew, self).__init__() self.num_feature_field = num_feature_field num_pairs = int(num_feature_field * (num_feature_field - 1) / 2) embed_size = embedding_size self.kernel = nn.Parameter(torch.rand(embed_size, num_pairs, embed_size), requires_grad=True) nn.init.xavier_uniform_(self.kernel) self def forward(self, input_0): primals_2 = self.kernel primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
BELIEVEfxy/LightSANs
OuterProductLayer
false
7,773
[ "MIT" ]
17
94ce7e59d144dbc787153b8c486cad334790ec6e
https://github.com/BELIEVEfxy/LightSANs/tree/94ce7e59d144dbc787153b8c486cad334790ec6e
Sign
from torch.autograd import Function import torch import torch.nn as nn class SignFunction(Function): def __init__(self): super(SignFunction, self).__init__() @staticmethod def forward(ctx, input, is_training=True): if is_training: prob = input.new(input.size()).uniform_() x = input.clone() x[(1 - input) / 2 <= prob] = 1 x[(1 - input) / 2 > prob] = -1 return x else: return input.sign() @staticmethod def backward(ctx, grad_output): return grad_output, None class Sign(nn.Module): def __init__(self): super(Sign, self).__init__() def forward(self, x): return SignFunction.apply(x, self.training) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.autograd import Function import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sign_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp1 < tmp0 tmp3 = tmp2.to(tl.int8) tmp4 = tmp0 < tmp1 tmp5 = tmp4.to(tl.int8) tmp6 = tmp3 - tmp5 tmp7 = tmp6.to(tmp0.dtype) tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sign_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class SignFunction(Function): def __init__(self): super(SignFunction, self).__init__() @staticmethod def forward(ctx, input, is_training=True): if is_training: prob = input.new(input.size()).uniform_() x = input.clone() x[(1 - input) / 2 <= prob] = 1 x[(1 - input) / 2 > prob] = -1 return x else: return input.sign() @staticmethod def backward(ctx, grad_output): return grad_output, None class SignNew(nn.Module): def __init__(self): super(SignNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Biaze7/lossy-image-compression
Sign
false
7,774
[ "MIT" ]
16
88ca2022a306fea52d6671593b314f0de3bf6010
https://github.com/Biaze7/lossy-image-compression/tree/88ca2022a306fea52d6671593b314f0de3bf6010
D_phiVpsi
import torch import torch.utils.data import torch.nn as nn def add_layer(seq, ix, n_inputs, n_outputs, nonlin, normalization): seq.add_module('L' + str(ix), nn.Linear(n_inputs, n_outputs)) if ix > 0 and normalization: if normalization == 'LN': seq.main.add_module('A' + str(ix), nn.LayerNorm(n_outputs)) else: raise ValueError('Unknown normalization: {}'.format(normalization)) if nonlin == 'LeakyReLU': seq.add_module('N' + str(ix), nn.LeakyReLU(0.2, inplace=True)) elif nonlin == 'ReLU': seq.add_module('N' + str(ix), nn.ReLU(inplace=True)) elif nonlin == 'Sigmoid': seq.add_module('N' + str(ix), nn.Sigmoid()) class D_phiVpsi(nn.Module): def __init__(self, insizes=[1, 1], layerSizes=[[32, 32, 16]] * 2, nonlin='LeakyReLU', normalization=None): super(D_phiVpsi, self).__init__() self.phi_x, self.psi_y = nn.Sequential(), nn.Sequential() for seq, insize, layerSize in [(self.phi_x, insizes[0], layerSizes[ 0]), (self.psi_y, insizes[1], layerSizes[1])]: for ix, n_inputs, n_outputs in zip(range(len(layerSize)), [ insize] + layerSize[:-1], layerSize): add_layer(seq, ix, n_inputs, n_outputs, nonlin, normalization) self.phiD, self.psiD = layerSizes[0][-1], layerSizes[1][-1] self.W = nn.Parameter(torch.randn(self.phiD, self.psiD)) def forward(self, x, y): x = x.view(x.size(0), -1) y = y.view(x.size(0), 1) phi_x = self.phi_x(x) psi_y = self.psi_y(y) out = (torch.mm(phi_x, self.W) * psi_y).sum(1, keepdim=True) return out def get_inputs(): return [torch.rand([4, 1]), torch.rand([4, 1])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x2, tmp7, xmask) @triton.jit def triton_per_fused_leaky_relu_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0) tmp2 = 0.0 tmp3 = tmp1 > tmp2 tmp4 = 0.2 tmp5 = tmp1 * tmp4 tmp6 = tl.where(tmp3, tmp1, tmp5) tmp7 = tmp0 * tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tl.store(out_ptr0 + x0, tmp11, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15) = args args.clear() assert_size_stride(primals_1, (4, 1), (1, 1)) assert_size_stride(primals_2, (4, 1), (1, 1)) assert_size_stride(primals_3, (32, 1), (1, 1)) assert_size_stride(primals_4, (32,), (1,)) assert_size_stride(primals_5, (32, 32), (32, 1)) assert_size_stride(primals_6, (32,), (1,)) assert_size_stride(primals_7, (16, 32), (32, 1)) assert_size_stride(primals_8, (16,), (1,)) assert_size_stride(primals_9, (32, 1), (1, 1)) assert_size_stride(primals_10, (32,), (1,)) assert_size_stride(primals_11, (32, 32), (32, 1)) assert_size_stride(primals_12, (32,), (1,)) assert_size_stride(primals_13, (16, 32), (32, 1)) assert_size_stride(primals_14, (16,), (1,)) assert_size_stride(primals_15, (16, 16), (16, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 32), (32, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_3, (1, 32), (1, 1), 0), out=buf0) del primals_3 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(128)](buf1, primals_4, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 buf2 = empty_strided_cuda((4, 32), (32, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_5, (32, 32), (1, 32), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_leaky_relu_0[grid(128)](buf3, primals_6, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.mm(buf3, reinterpret_tensor(primals_7, (32, 16), (1, 32), 0), out=buf4) buf5 = buf4 del buf4 triton_poi_fused_leaky_relu_1[grid(64)](buf5, primals_8, 64, XBLOCK =64, num_warps=1, num_stages=1) del primals_8 buf6 = empty_strided_cuda((4, 32), (32, 1), torch.float32) extern_kernels.mm(primals_2, reinterpret_tensor(primals_9, (1, 32), (1, 1), 0), out=buf6) del primals_9 buf7 = buf6 del buf6 triton_poi_fused_leaky_relu_0[grid(128)](buf7, primals_10, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_10 buf8 = empty_strided_cuda((4, 32), (32, 1), torch.float32) extern_kernels.mm(buf7, reinterpret_tensor(primals_11, (32, 32), (1, 32), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_leaky_relu_0[grid(128)](buf9, primals_12, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_12 buf10 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_14, buf9, reinterpret_tensor( primals_13, (32, 16), (1, 32), 0), alpha=1, beta=1, out=buf10) del primals_14 buf11 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.mm(buf5, primals_15, out=buf11) buf12 = empty_strided_cuda((4, 1), (1, 1), torch.float32) triton_per_fused_leaky_relu_mul_sum_2[grid(4)](buf11, buf10, buf12, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) return (buf12, primals_2, primals_1, buf1, buf3, buf5, buf7, buf9, buf10, buf11, reinterpret_tensor(primals_15, (16, 16), (1, 16), 0), primals_13, primals_11, primals_7, primals_5) def add_layer(seq, ix, n_inputs, n_outputs, nonlin, normalization): seq.add_module('L' + str(ix), nn.Linear(n_inputs, n_outputs)) if ix > 0 and normalization: if normalization == 'LN': seq.main.add_module('A' + str(ix), nn.LayerNorm(n_outputs)) else: raise ValueError('Unknown normalization: {}'.format(normalization)) if nonlin == 'LeakyReLU': seq.add_module('N' + str(ix), nn.LeakyReLU(0.2, inplace=True)) elif nonlin == 'ReLU': seq.add_module('N' + str(ix), nn.ReLU(inplace=True)) elif nonlin == 'Sigmoid': seq.add_module('N' + str(ix), nn.Sigmoid()) class D_phiVpsiNew(nn.Module): def __init__(self, insizes=[1, 1], layerSizes=[[32, 32, 16]] * 2, nonlin='LeakyReLU', normalization=None): super(D_phiVpsiNew, self).__init__() self.phi_x, self.psi_y = nn.Sequential(), nn.Sequential() for seq, insize, layerSize in [(self.phi_x, insizes[0], layerSizes[ 0]), (self.psi_y, insizes[1], layerSizes[1])]: for ix, n_inputs, n_outputs in zip(range(len(layerSize)), [ insize] + layerSize[:-1], layerSize): add_layer(seq, ix, n_inputs, n_outputs, nonlin, normalization) self.phiD, self.psiD = layerSizes[0][-1], layerSizes[1][-1] self.W = nn.Parameter(torch.randn(self.phiD, self.psiD)) def forward(self, input_0, input_1): primals_15 = self.W primals_3 = self.phi_x.L0.weight primals_4 = self.phi_x.L0.bias primals_5 = self.phi_x.L1.weight primals_6 = self.phi_x.L1.bias primals_7 = self.phi_x.L2.weight primals_8 = self.phi_x.L2.bias primals_9 = self.psi_y.L0.weight primals_10 = self.psi_y.L0.bias primals_11 = self.psi_y.L1.weight primals_12 = self.psi_y.L1.bias primals_13 = self.psi_y.L2.weight primals_14 = self.psi_y.L2.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return output[0]
Bhaskers-Blu-Org1/SIC
D_phiVpsi
false
7,775
[ "Apache-2.0" ]
12
c4e45d7736da6e6faabdc56bfc1336445df99204
https://github.com/Bhaskers-Blu-Org1/SIC/tree/c4e45d7736da6e6faabdc56bfc1336445df99204
Vgg16
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class Vgg16(nn.Module): def __init__(self): super(Vgg16, self).__init__() self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1) self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1) self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1) self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1) self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) def forward(self, X): h = F.relu(self.conv1_1(X), inplace=True) h = F.relu(self.conv1_2(h), inplace=True) h = F.max_pool2d(h, kernel_size=2, stride=2) h = F.relu(self.conv2_1(h), inplace=True) h = F.relu(self.conv2_2(h), inplace=True) h = F.max_pool2d(h, kernel_size=2, stride=2) h = F.relu(self.conv3_1(h), inplace=True) h = F.relu(self.conv3_2(h), inplace=True) h = F.relu(self.conv3_3(h), inplace=True) h = F.max_pool2d(h, kernel_size=2, stride=2) h = F.relu(self.conv4_1(h), inplace=True) h = F.relu(self.conv4_2(h), inplace=True) h = F.relu(self.conv4_3(h), inplace=True) h = F.relu(self.conv5_1(h), inplace=True) h = F.relu(self.conv5_2(h), inplace=True) h = F.relu(self.conv5_3(h), inplace=True) relu5_3 = h return relu5_3 def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = xindex // 64 % 32 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 128 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4160 + x0 + 128 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 % 16 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 256 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4224 + x0 + 256 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_14(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 256 x1 = xindex // 256 % 8 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 512 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4352 + x0 + 512 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_16(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 512 y1 = yindex // 512 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 512 * x2 + 32768 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 64 * y3), tmp4, xmask) tl.store(out_ptr1 + (y0 + 512 * x2 + 32768 * y1), tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27) = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256,), (1,)) assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (512,), (1,)) assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_19, (512,), (1,)) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512,), (1,)) assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (512,), (1,)) assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (512,), (1,)) assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_27, (512,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(192, 9)](primals_1, buf0, 192, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch. float32) triton_poi_fused_2[grid(4096, 9)](primals_4, buf2, 4096, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_4[grid(16384, 9)](primals_8, buf4, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_5[grid(32768, 9)](primals_10, buf5, 32768, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_6[grid(65536, 9)](primals_12, buf6, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_12 buf7 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_6[grid(65536, 9)](primals_14, buf7, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_14 buf8 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_7[grid(131072, 9)](primals_16, buf8, 131072, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_16 buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_18, buf9, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_18 buf10 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_20, buf10, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_20 buf11 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_22, buf11, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_22 buf12 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_24, buf12, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_24 buf13 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_26, buf13, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_26 buf14 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf15 = buf14 del buf14 triton_poi_fused_convolution_relu_9[grid(1048576)](buf15, primals_2, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf16 = extern_kernels.convolution(buf15, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf17 = buf16 del buf16 triton_poi_fused_convolution_relu_9[grid(1048576)](buf17, primals_5, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf18 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32) buf19 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.int8) triton_poi_fused_max_pool2d_with_indices_10[grid(262144)](buf17, buf18, buf19, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf20 = extern_kernels.convolution(buf18, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf21 = buf20 del buf20 triton_poi_fused_convolution_relu_11[grid(524288)](buf21, primals_7, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf22 = extern_kernels.convolution(buf21, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf23 = buf22 del buf22 triton_poi_fused_convolution_relu_11[grid(524288)](buf23, primals_9, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf24 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.float32) buf25 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.int8) triton_poi_fused_max_pool2d_with_indices_12[grid(131072)](buf23, buf24, buf25, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf26 = extern_kernels.convolution(buf24, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf27 = buf26 del buf26 triton_poi_fused_convolution_relu_13[grid(262144)](buf27, primals_11, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf28 = extern_kernels.convolution(buf27, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf29 = buf28 del buf28 triton_poi_fused_convolution_relu_13[grid(262144)](buf29, primals_13, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf30 = extern_kernels.convolution(buf29, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf31 = buf30 del buf30 triton_poi_fused_convolution_relu_13[grid(262144)](buf31, primals_15, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_15 buf32 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.float32) buf33 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.int8) triton_poi_fused_max_pool2d_with_indices_14[grid(65536)](buf31, buf32, buf33, 65536, XBLOCK=512, num_warps=4, num_stages=1) buf34 = extern_kernels.convolution(buf32, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf35 = buf34 del buf34 triton_poi_fused_convolution_relu_15[grid(131072)](buf35, primals_17, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_17 buf36 = extern_kernels.convolution(buf35, buf9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf37 = buf36 del buf36 triton_poi_fused_convolution_relu_15[grid(131072)](buf37, primals_19, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_19 buf38 = extern_kernels.convolution(buf37, buf10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf39 = buf38 del buf38 triton_poi_fused_convolution_relu_15[grid(131072)](buf39, primals_21, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_21 buf40 = extern_kernels.convolution(buf39, buf11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf41 = buf40 del buf40 triton_poi_fused_convolution_relu_15[grid(131072)](buf41, primals_23, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_23 buf42 = extern_kernels.convolution(buf41, buf12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf43 = buf42 del buf42 triton_poi_fused_convolution_relu_15[grid(131072)](buf43, primals_25, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_25 buf44 = extern_kernels.convolution(buf43, buf13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf44, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf45 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch .float32) buf46 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_16[grid(2048, 64) ](buf44, primals_27, buf45, buf46, 2048, 64, XBLOCK=32, YBLOCK= 32, num_warps=4, num_stages=1) del buf44 del primals_27 return (buf45, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf10, buf11, buf12, buf13, buf15, buf17, buf18, buf19, buf21, buf23, buf24, buf25, buf27, buf29, buf31, buf32, buf33, buf35, buf37, buf39, buf41, buf43, buf46) class Vgg16New(nn.Module): def __init__(self): super(Vgg16New, self).__init__() self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1) self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1) self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1) self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1) self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) def forward(self, input_0): primals_1 = self.conv1_1.weight primals_2 = self.conv1_1.bias primals_4 = self.conv1_2.weight primals_5 = self.conv1_2.bias primals_6 = self.conv2_1.weight primals_7 = self.conv2_1.bias primals_8 = self.conv2_2.weight primals_9 = self.conv2_2.bias primals_10 = self.conv3_1.weight primals_11 = self.conv3_1.bias primals_12 = self.conv3_2.weight primals_13 = self.conv3_2.bias primals_14 = self.conv3_3.weight primals_15 = self.conv3_3.bias primals_16 = self.conv4_1.weight primals_17 = self.conv4_1.bias primals_18 = self.conv4_2.weight primals_19 = self.conv4_2.bias primals_20 = self.conv4_3.weight primals_21 = self.conv4_3.bias primals_22 = self.conv5_1.weight primals_23 = self.conv5_1.bias primals_24 = self.conv5_2.weight primals_25 = self.conv5_2.bias primals_26 = self.conv5_3.weight primals_27 = self.conv5_3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27]) return output[0]
AllenPu/mbdg
Vgg16
false
7,776
[ "MIT" ]
27
243f53a57dcf4bfb6e717c0c9f64a839cff8d548
https://github.com/AllenPu/mbdg/tree/243f53a57dcf4bfb6e717c0c9f64a839cff8d548
BaseFactorizationMachine
import torch import torch.nn as nn class BaseFactorizationMachine(nn.Module): """Calculate FM result over the embeddings Args: reduce_sum: bool, whether to sum the result, default is True. Input: input_x: tensor, A 3D tensor with shape:``(batch_size,field_size,embed_dim)``. Output output: tensor, A 3D tensor with shape: ``(batch_size,1)`` or ``(batch_size, embed_dim)``. """ def __init__(self, reduce_sum=True): super(BaseFactorizationMachine, self).__init__() self.reduce_sum = reduce_sum def forward(self, input_x): square_of_sum = torch.sum(input_x, dim=1) ** 2 sum_of_square = torch.sum(input_x ** 2, dim=1) output = square_of_sum - sum_of_square if self.reduce_sum: output = torch.sum(output, dim=1, keepdim=True) output = 0.5 * output return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp16 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask) tmp17 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask) tmp19 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask) tmp21 = tl.load(in_ptr0 + (52 + x0 + 64 * x1), xmask) tmp33 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), xmask) tmp34 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), xmask) tmp36 = tl.load(in_ptr0 + (40 + x0 + 64 * x1), xmask) tmp38 = tl.load(in_ptr0 + (56 + x0 + 64 * x1), xmask) tmp50 = tl.load(in_ptr0 + (12 + x0 + 64 * x1), xmask) tmp51 = tl.load(in_ptr0 + (28 + x0 + 64 * x1), xmask) tmp53 = tl.load(in_ptr0 + (44 + x0 + 64 * x1), xmask) tmp55 = tl.load(in_ptr0 + (60 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp0 * tmp0 tmp9 = tmp1 * tmp1 tmp10 = tmp8 + tmp9 tmp11 = tmp3 * tmp3 tmp12 = tmp10 + tmp11 tmp13 = tmp5 * tmp5 tmp14 = tmp12 + tmp13 tmp15 = tmp7 - tmp14 tmp18 = tmp16 + tmp17 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp23 = tmp22 * tmp22 tmp24 = tmp16 * tmp16 tmp25 = tmp17 * tmp17 tmp26 = tmp24 + tmp25 tmp27 = tmp19 * tmp19 tmp28 = tmp26 + tmp27 tmp29 = tmp21 * tmp21 tmp30 = tmp28 + tmp29 tmp31 = tmp23 - tmp30 tmp32 = tmp15 + tmp31 tmp35 = tmp33 + tmp34 tmp37 = tmp35 + tmp36 tmp39 = tmp37 + tmp38 tmp40 = tmp39 * tmp39 tmp41 = tmp33 * tmp33 tmp42 = tmp34 * tmp34 tmp43 = tmp41 + tmp42 tmp44 = tmp36 * tmp36 tmp45 = tmp43 + tmp44 tmp46 = tmp38 * tmp38 tmp47 = tmp45 + tmp46 tmp48 = tmp40 - tmp47 tmp49 = tmp32 + tmp48 tmp52 = tmp50 + tmp51 tmp54 = tmp52 + tmp53 tmp56 = tmp54 + tmp55 tmp57 = tmp56 * tmp56 tmp58 = tmp50 * tmp50 tmp59 = tmp51 * tmp51 tmp60 = tmp58 + tmp59 tmp61 = tmp53 * tmp53 tmp62 = tmp60 + tmp61 tmp63 = tmp55 * tmp55 tmp64 = tmp62 + tmp63 tmp65 = tmp57 - tmp64 tmp66 = tmp49 + tmp65 tmp67 = 0.5 tmp68 = tmp66 * tmp67 tl.store(in_out_ptr0 + x2, tmp68, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_mul_pow_sub_sum_0[grid(16)](buf1, arg0_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return buf1, class BaseFactorizationMachineNew(nn.Module): """Calculate FM result over the embeddings Args: reduce_sum: bool, whether to sum the result, default is True. Input: input_x: tensor, A 3D tensor with shape:``(batch_size,field_size,embed_dim)``. Output output: tensor, A 3D tensor with shape: ``(batch_size,1)`` or ``(batch_size, embed_dim)``. """ def __init__(self, reduce_sum=True): super(BaseFactorizationMachineNew, self).__init__() self.reduce_sum = reduce_sum def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
BELIEVEfxy/LightSANs
BaseFactorizationMachine
false
7,777
[ "MIT" ]
17
94ce7e59d144dbc787153b8c486cad334790ec6e
https://github.com/BELIEVEfxy/LightSANs/tree/94ce7e59d144dbc787153b8c486cad334790ec6e
AdjEncoder
import torch from torch import nn import torch.utils.data class AdjEncoder(nn.Module): def __init__(self, featureSize, hiddenSize): super(AdjEncoder, self).__init__() self.left = nn.Linear(featureSize, hiddenSize) self.right = nn.Linear(featureSize, hiddenSize, bias=False) self.second = nn.Linear(hiddenSize, hiddenSize) self.third = nn.Linear(hiddenSize, featureSize) self.tanh = nn.Tanh() def forward(self, left_in, right_in): out = self.left(left_in) out += self.right(right_in) out = self.tanh(out) out = self.second(out) out = self.tanh(out) out = self.third(out) out = self.tanh(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'featureSize': 4, 'hiddenSize': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = libdevice.tanh(tmp4) tl.store(in_out_ptr0 + x3, tmp5, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](buf2, primals_2, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf3 = buf1 del buf1 extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 triton_poi_fused_tanh_1[grid(256)](buf4, primals_7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf5) buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_tanh_1[grid(256)](buf6, primals_9, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_9 return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_5, (64, 4), (4, 1), 0 ), buf2, buf4, buf6, primals_8, primals_6 class AdjEncoderNew(nn.Module): def __init__(self, featureSize, hiddenSize): super(AdjEncoderNew, self).__init__() self.left = nn.Linear(featureSize, hiddenSize) self.right = nn.Linear(featureSize, hiddenSize, bias=False) self.second = nn.Linear(hiddenSize, hiddenSize) self.third = nn.Linear(hiddenSize, featureSize) self.tanh = nn.Tanh() def forward(self, input_0, input_1): primals_1 = self.left.weight primals_2 = self.left.bias primals_4 = self.right.weight primals_6 = self.second.weight primals_7 = self.second.bias primals_8 = self.third.weight primals_9 = self.third.bias primals_3 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
BigkoalaZhu/SCORES
AdjEncoder
false
7,778
[ "MIT" ]
16
8332733c375ee85c02bd34c2adce6a3213aad3c4
https://github.com/BigkoalaZhu/SCORES/tree/8332733c375ee85c02bd34c2adce6a3213aad3c4
Binarizer
from torch.autograd import Function import torch import torch.nn as nn import torch.nn.functional as F class SignFunction(Function): def __init__(self): super(SignFunction, self).__init__() @staticmethod def forward(ctx, input, is_training=True): if is_training: prob = input.new(input.size()).uniform_() x = input.clone() x[(1 - input) / 2 <= prob] = 1 x[(1 - input) / 2 > prob] = -1 return x else: return input.sign() @staticmethod def backward(ctx, grad_output): return grad_output, None class Sign(nn.Module): def __init__(self): super(Sign, self).__init__() def forward(self, x): return SignFunction.apply(x, self.training) class Binarizer(nn.Module): def __init__(self, in_channels, out_channels): super(Binarizer, self).__init__() self.sign = Sign() self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False) def forward(self, x): x = self.conv1(x) x = F.tanh(x) return self.sign(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.autograd import Function import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sign_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tmp2 = tl.full([1], 0, tl.int32) tmp3 = tmp2 < tmp1 tmp4 = tmp3.to(tl.int8) tmp5 = tmp1 < tmp2 tmp6 = tmp5.to(tl.int8) tmp7 = tmp4 - tmp6 tmp8 = tmp7.to(tmp1.dtype) tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sign_tanh_0[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf1, primals_1, primals_2, buf0 class SignFunction(Function): def __init__(self): super(SignFunction, self).__init__() @staticmethod def forward(ctx, input, is_training=True): if is_training: prob = input.new(input.size()).uniform_() x = input.clone() x[(1 - input) / 2 <= prob] = 1 x[(1 - input) / 2 > prob] = -1 return x else: return input.sign() @staticmethod def backward(ctx, grad_output): return grad_output, None class Sign(nn.Module): def __init__(self): super(Sign, self).__init__() def forward(self, x): return SignFunction.apply(x, self.training) class BinarizerNew(nn.Module): def __init__(self, in_channels, out_channels): super(BinarizerNew, self).__init__() self.sign = Sign() self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
Biaze7/lossy-image-compression
Binarizer
false
7,779
[ "MIT" ]
16
88ca2022a306fea52d6671593b314f0de3bf6010
https://github.com/Biaze7/lossy-image-compression/tree/88ca2022a306fea52d6671593b314f0de3bf6010
Perceptron
import torch import torch.nn as nn import torch.nn.functional as F class Perceptron(nn.Module): """Implements a 1-layer perceptron.""" def __init__(self, input_dimension, hidden_dimension, output_dimension): super(Perceptron, self).__init__() self._layer1 = nn.Linear(input_dimension, hidden_dimension) self._layer2 = nn.Linear(hidden_dimension, output_dimension, bias=False ) def forward(self, inp): return F.sigmoid(self._layer2(F.relu(self._layer1(inp)))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dimension': 4, 'hidden_dimension': 4, 'output_dimension': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tl.store(in_out_ptr0 + x0, tmp1, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_sigmoid_1[grid(256)](buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf3, primals_4, buf4 class PerceptronNew(nn.Module): """Implements a 1-layer perceptron.""" def __init__(self, input_dimension, hidden_dimension, output_dimension): super(PerceptronNew, self).__init__() self._layer1 = nn.Linear(input_dimension, hidden_dimension) self._layer2 = nn.Linear(hidden_dimension, output_dimension, bias=False ) def forward(self, input_0): primals_1 = self._layer1.weight primals_2 = self._layer1.bias primals_4 = self._layer2.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Bhaskers-Blu-Org2/PDP-Solver
Perceptron
false
7,780
[ "MIT" ]
28
1fca34d81f36268288f46416fb6956e5b36df69e
https://github.com/Bhaskers-Blu-Org2/PDP-Solver/tree/1fca34d81f36268288f46416fb6956e5b36df69e
BoxEncoder
import torch from torch import nn import torch.utils.data class BoxEncoder(nn.Module): def __init__(self, boxSize, featureSize, hiddenSize): super(BoxEncoder, self).__init__() self.encoder = nn.Linear(boxSize, featureSize) self.middlein = nn.Linear(featureSize, hiddenSize) self.middleout = nn.Linear(hiddenSize, featureSize) self.tanh = nn.Tanh() def forward(self, boxes_in): boxes = self.encoder(boxes_in) boxes = self.tanh(boxes) boxes = self.middlein(boxes) boxes = self.tanh(boxes) boxes = self.middleout(boxes) boxes = self.tanh(boxes) return boxes def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'boxSize': 4, 'featureSize': 4, 'hiddenSize': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_tanh_0[grid(256)](buf3, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused_tanh_0[grid(256)](buf5, primals_7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf3, buf5, primals_6, primals_4 class BoxEncoderNew(nn.Module): def __init__(self, boxSize, featureSize, hiddenSize): super(BoxEncoderNew, self).__init__() self.encoder = nn.Linear(boxSize, featureSize) self.middlein = nn.Linear(featureSize, hiddenSize) self.middleout = nn.Linear(hiddenSize, featureSize) self.tanh = nn.Tanh() def forward(self, input_0): primals_1 = self.encoder.weight primals_2 = self.encoder.bias primals_4 = self.middlein.weight primals_5 = self.middlein.bias primals_6 = self.middleout.weight primals_7 = self.middleout.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
BigkoalaZhu/SCORES
BoxEncoder
false
7,781
[ "MIT" ]
16
8332733c375ee85c02bd34c2adce6a3213aad3c4
https://github.com/BigkoalaZhu/SCORES/tree/8332733c375ee85c02bd34c2adce6a3213aad3c4
kAttentionPooling
import torch import torch.nn as nn class kAttentionPooling(nn.Module): def __init__(self, seq_len, hidden_size, k_heads=5): super().__init__() self.k_heads = k_heads self.theta_k = nn.Parameter(torch.randn([hidden_size, k_heads])) def forward(self, input_tensor): attention_matrix = torch.matmul(input_tensor, self.theta_k) attention_matrix = nn.Softmax(dim=-2)(attention_matrix) pooling_result = torch.einsum('nij, nik -> nkj', input_tensor, attention_matrix) return pooling_result def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'seq_len': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 5 x2 = xindex // 20 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 20 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (5 + x0 + 20 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (10 + x0 + 20 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (15 + x0 + 20 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 5 x2 = xindex // 20 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 20 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (5 + x0 + 20 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (10 + x0 + 20 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (15 + x0 + 20 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 5), (5, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 5), (5, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(80)](buf0, buf1, 80, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32) triton_poi_fused__softmax_1[grid(80)](buf1, buf2, 80, XBLOCK=128, num_warps=4, num_stages=1) buf3 = buf1 del buf1 extern_kernels.bmm(reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0), buf2, out=buf3) del buf2 return reinterpret_tensor(buf3, (4, 5, 4), (20, 1, 5), 0), primals_2, buf0 class kAttentionPoolingNew(nn.Module): def __init__(self, seq_len, hidden_size, k_heads=5): super().__init__() self.k_heads = k_heads self.theta_k = nn.Parameter(torch.randn([hidden_size, k_heads])) def forward(self, input_0): primals_1 = self.theta_k primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
BELIEVEfxy/LightSANs
kAttentionPooling
false
7,782
[ "MIT" ]
17
94ce7e59d144dbc787153b8c486cad334790ec6e
https://github.com/BELIEVEfxy/LightSANs/tree/94ce7e59d144dbc787153b8c486cad334790ec6e