entry_point
stringlengths
1
65
original_triton_python_code
stringlengths
208
619k
optimised_triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
CELoss
import torch import torch.nn as nn import torch.nn.functional as F import torch.optim import torch._utils import torch.nn class CELoss(nn.Module): """ Distilling the Knowledge in a Neural Network, NIPS2014. https://arxiv.org/pdf/1503.02531.pdf """ def __init__(self, T=1, loss_weight=1.0): super().__init__() self.loss_weight = loss_weight self.t = T def forward(self, s_preds, t_preds, **kwargs): loss = 0 for s_pred, t_pred in zip(s_preds, t_preds): s = F.log_softmax(s_pred / self.t, dim=1) t = F.softmax(t_pred / self.t, dim=1) loss += torch.mean(torch.sum(-t * s, 1)) return loss * self.loss_weight def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.optim import torch._utils import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused__log_softmax__softmax_mul_neg_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr1 + x3, xmask) tmp11 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr1 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr1 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr1 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = -tmp8 tmp12 = tl_math.exp(tmp11) tmp14 = tl_math.exp(tmp13) tmp15 = tmp12 + tmp14 tmp17 = tl_math.exp(tmp16) tmp18 = tmp15 + tmp17 tmp20 = tl_math.exp(tmp19) tmp21 = tmp18 + tmp20 tmp22 = tl_math.log(tmp21) tmp23 = tmp10 - tmp22 tmp24 = tmp9 * tmp23 tl.store(out_ptr0 + x3, tmp24, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + (128 + x3), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (132 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (136 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (140 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + (128 + x3), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (132 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (136 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (140 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + (192 + x3), xmask) tmp3 = tl.load(in_ptr0 + (192 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (196 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (200 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (204 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + (192 + x3), xmask) tmp3 = tl.load(in_ptr0 + (192 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (196 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (200 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (204 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused__softmax_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + (64 + x3), xmask) tmp3 = tl.load(in_ptr0 + (64 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (68 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (72 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (76 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + (64 + x3), xmask) tmp3 = tl.load(in_ptr0 + (64 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (68 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (72 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (76 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_per_fused_add_mean_mul_sum_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 4 r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + (r0 + 16 * r1), None) tmp1 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), None) tmp3 = tl.load(in_ptr0 + (8 + r0 + 16 * r1), None) tmp5 = tl.load(in_ptr0 + (12 + r0 + 16 * r1), None) tmp10 = tl.load(in_ptr1 + (r0 + 16 * r1), None) tmp11 = tl.load(in_ptr1 + (4 + r0 + 16 * r1), None) tmp13 = tl.load(in_ptr1 + (8 + r0 + 16 * r1), None) tmp15 = tl.load(in_ptr1 + (12 + r0 + 16 * r1), None) tmp20 = tl.load(in_ptr2 + (r0 + 16 * r1), None) tmp21 = tl.load(in_ptr2 + (4 + r0 + 16 * r1), None) tmp23 = tl.load(in_ptr2 + (8 + r0 + 16 * r1), None) tmp25 = tl.load(in_ptr2 + (12 + r0 + 16 * r1), None) tmp30 = tl.load(in_ptr3 + (r0 + 16 * r1), None) tmp31 = tl.load(in_ptr3 + (4 + r0 + 16 * r1), None) tmp33 = tl.load(in_ptr3 + (8 + r0 + 16 * r1), None) tmp35 = tl.load(in_ptr3 + (12 + r0 + 16 * r1), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp12 = tmp10 + tmp11 tmp14 = tmp12 + tmp13 tmp16 = tmp14 + tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.sum(tmp17, 1)[:, None] tmp22 = tmp20 + tmp21 tmp24 = tmp22 + tmp23 tmp26 = tmp24 + tmp25 tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK]) tmp29 = tl.sum(tmp27, 1)[:, None] tmp32 = tmp30 + tmp31 tmp34 = tmp32 + tmp33 tmp36 = tmp34 + tmp35 tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp39 = tl.sum(tmp37, 1)[:, None] tmp40 = 16.0 tmp41 = tmp9 / tmp40 tmp42 = 0.0 tmp43 = tmp41 + tmp42 tmp44 = tmp19 / tmp40 tmp45 = tmp43 + tmp44 tmp46 = tmp29 / tmp40 tmp47 = tmp45 + tmp46 tmp48 = tmp39 / tmp40 tmp49 = tmp47 + tmp48 tmp50 = 1.0 tmp51 = tmp49 * tmp50 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp51, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_1[grid(64)](arg0_1, buf1, 64, XBLOCK=64, num_warps =1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__log_softmax__softmax_mul_neg_2[grid(64)](buf0, buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = buf1 del buf1 triton_poi_fused__softmax_3[grid(64)](arg1_1, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = buf0 del buf0 triton_poi_fused_4[grid(64)](arg0_1, buf9, 64, XBLOCK=64, num_warps =1, num_stages=1) buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__log_softmax__softmax_mul_neg_2[grid(64)](buf8, buf9, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) buf12 = buf9 del buf9 triton_poi_fused__softmax_5[grid(64)](arg1_1, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) buf13 = buf8 del buf8 triton_poi_fused_6[grid(64)](arg0_1, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__log_softmax__softmax_mul_neg_2[grid(64)](buf12, buf13, buf14, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = buf13 del buf13 triton_poi_fused__softmax_7[grid(64)](arg1_1, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg1_1 buf5 = buf12 del buf12 triton_poi_fused_8[grid(64)](arg0_1, buf5, 64, XBLOCK=64, num_warps =1, num_stages=1) del arg0_1 buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__log_softmax__softmax_mul_neg_2[grid(64)](buf4, buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf4 del buf5 buf11 = empty_strided_cuda((), (), torch.float32) buf16 = buf11 del buf11 triton_per_fused_add_mean_mul_sum_9[grid(1)](buf16, buf2, buf6, buf10, buf14, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf10 del buf14 del buf2 del buf6 return buf16, class CELossNew(nn.Module): """ Distilling the Knowledge in a Neural Network, NIPS2014. https://arxiv.org/pdf/1503.02531.pdf """ def __init__(self, T=1, loss_weight=1.0): super().__init__() self.loss_weight = loss_weight self.t = T def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ModelTC/EOD
CELoss
false
14,073
[ "Apache-2.0" ]
196
164bff80486e9ae6a095a97667b365c46ceabd86
https://github.com/ModelTC/EOD/tree/164bff80486e9ae6a095a97667b365c46ceabd86
BiaffineScorer
import torch import torch.nn as nn class BiaffineScorer(nn.Module): def __init__(self, input1_size, input2_size, output_size): super().__init__() self.W_bilin = nn.Bilinear(input1_size + 1, input2_size + 1, output_size) self.W_bilin.weight.data.zero_() self.W_bilin.bias.data.zero_() def forward(self, input1, input2): input1 = torch.cat([input1, input1.new_ones(*input1.size()[:-1], 1) ], len(input1.size()) - 1) input2 = torch.cat([input2, input2.new_ones(*input2.size()[:-1], 1) ], len(input2.size()) - 1) return self.W_bilin(input1, input2) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input1_size': 4, 'input2_size': 4, 'output_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 5, tl.int64) tmp9 = 1.0 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp6, tmp9, tmp10) tmp12 = tl.where(tmp4, tmp5, tmp11) tl.store(out_ptr0 + x2, tmp12, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 5, 5), (25, 5, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(320)](primals_1, buf0, 320, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32) triton_poi_fused_cat_0[grid(320)](primals_2, buf1, 320, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = torch.ops.aten._trilinear.default(reinterpret_tensor(buf0, ( 64, 5), (5, 1), 0), primals_3, reinterpret_tensor(buf1, (64, 5), (5, 1), 0), [1, 3], [0], [1, 2], [2, 3]) del primals_3 buf3 = buf2 del buf2 buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 triton_poi_fused_add_1[grid(256)](buf4, primals_4, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 return buf4, reinterpret_tensor(buf0, (64, 5), (5, 1), 0 ), reinterpret_tensor(buf1, (64, 5), (5, 1), 0) class BiaffineScorerNew(nn.Module): def __init__(self, input1_size, input2_size, output_size): super().__init__() self.W_bilin = nn.Bilinear(input1_size + 1, input2_size + 1, output_size) self.W_bilin.weight.data.zero_() self.W_bilin.bias.data.zero_() def forward(self, input_0, input_1): primals_3 = self.W_bilin.weight primals_4 = self.W_bilin.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
NLPInBLCU/BiaffineDependencyParsing
BiaffineScorer
false
14,074
[ "MIT" ]
67
40b133648c747957dacd59916add0403371fe680
https://github.com/NLPInBLCU/BiaffineDependencyParsing/tree/40b133648c747957dacd59916add0403371fe680
DeepHeadModule
import torch import torch.nn as nn import torch.nn.functional as F from math import sqrt as sqrt class DeepHeadModule(nn.Module): def __init__(self, input_channels, output_channels): super(DeepHeadModule, self).__init__() self._input_channels = input_channels self._output_channels = output_channels self._mid_channels = min(self._input_channels, 256) self.conv1 = nn.Conv2d(self._input_channels, self._mid_channels, kernel_size=3, dilation=1, stride=1, padding=1) self.conv2 = nn.Conv2d(self._mid_channels, self._mid_channels, kernel_size=3, dilation=1, stride=1, padding=1) self.conv3 = nn.Conv2d(self._mid_channels, self._mid_channels, kernel_size=3, dilation=1, stride=1, padding=1) self.conv4 = nn.Conv2d(self._mid_channels, self._output_channels, kernel_size=1, dilation=1, stride=1, padding=0) def forward(self, x): return self.conv4(F.relu(self.conv3(F.relu(self.conv2(F.relu(self. conv1(x), inplace=True)), inplace=True)), inplace=True)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channels': 4, 'output_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from math import sqrt as sqrt assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_0[grid(256)](buf3, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_0[grid(256)](buf5, primals_7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_1[grid(256)](buf7, primals_9, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_9 return (buf7, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf3, buf5) class DeepHeadModuleNew(nn.Module): def __init__(self, input_channels, output_channels): super(DeepHeadModuleNew, self).__init__() self._input_channels = input_channels self._output_channels = output_channels self._mid_channels = min(self._input_channels, 256) self.conv1 = nn.Conv2d(self._input_channels, self._mid_channels, kernel_size=3, dilation=1, stride=1, padding=1) self.conv2 = nn.Conv2d(self._mid_channels, self._mid_channels, kernel_size=3, dilation=1, stride=1, padding=1) self.conv3 = nn.Conv2d(self._mid_channels, self._mid_channels, kernel_size=3, dilation=1, stride=1, padding=1) self.conv4 = nn.Conv2d(self._mid_channels, self._output_channels, kernel_size=1, dilation=1, stride=1, padding=0) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
NTech-Lab/deepfake-detection-challenge
DeepHeadModule
false
14,075
[ "Apache-2.0" ]
98
52095ce4a49f298faf075a5eb28391722b9e4103
https://github.com/NTech-Lab/deepfake-detection-challenge/tree/52095ce4a49f298faf075a5eb28391722b9e4103
ConvGelu
import torch import torch.nn as nn import torch.fx class ConvGelu(torch.nn.Module): def __init__(self): super(ConvGelu, self).__init__() self.conv = nn.Conv2d(3, 32, 3, 1) self.gelu = nn.GELU() def forward(self, x): x = self.conv(x) x = self.gelu(x) return x def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.fx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_gelu_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 492032 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3844 % 32 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = 0.7071067811865476 tmp6 = tmp2 * tmp5 tmp7 = libdevice.erf(tmp6) tmp8 = 1.0 tmp9 = tmp7 + tmp8 tmp10 = tmp4 * tmp9 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp10, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 62, 62), (123008, 3844, 62, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 32, 62, 62), (123008, 3844, 62, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_gelu_0[grid(492032)](buf1, primals_2, buf2, 492032, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 return buf2, primals_1, primals_3, buf1 class ConvGeluNew(torch.nn.Module): def __init__(self): super(ConvGeluNew, self).__init__() self.conv = nn.Conv2d(3, 32, 3, 1) self.gelu = nn.GELU() def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
NVIDIA/Torch-TensorRT
ConvGelu
false
14,076
[ "BSD-3-Clause" ]
430
1a22204fecec690bc3c2a318dab4f57b98c57f05
https://github.com/NVIDIA/Torch-TensorRT/tree/1a22204fecec690bc3c2a318dab4f57b98c57f05
decoderVH
import torch import torch.nn as nn import torch.nn.functional as F class decoderVH(nn.Module): def __init__(self): super(decoderVH, self).__init__() self.dconv0 = nn.Conv2d(in_channels=256, out_channels=128, kernel_size=3, stride=1, padding=1, bias=True) self.dgn0 = nn.GroupNorm(8, 128) self.dconv1 = nn.Conv2d(in_channels=128, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True) self.dgn1 = nn.GroupNorm(4, 64) self.dconv2 = nn.Conv2d(in_channels=64, out_channels=4, kernel_size =3, stride=1, padding=1, bias=True) def forward(self, x): x = F.relu(self.dgn0(self.dconv0(x)), True) x = F.relu(self.dgn1(self.dconv1(F.interpolate(x, scale_factor=2, mode='bilinear'))), True) x = torch.tanh(self.dconv2(F.interpolate(x, scale_factor=2, mode= 'bilinear'))) mask, normal = torch.split(x, [1, 3], dim=1) mask = (mask + 1) * 0.5 normal = normal / torch.clamp(torch.sqrt(torch.sum(normal * normal, dim=1)).unsqueeze(1), min=1e-06) return normal, mask def get_inputs(): return [torch.rand([4, 256, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_red_fused_convolution_native_group_norm_0(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 256 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x4 = xindex x0 = xindex % 64 tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r5 = rindex r3 = rindex // 4096 tmp0 = tl.load(in_out_ptr0 + (r5 + 8192 * x4), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr0 + (r3 + 2 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = (triton_helpers. welford_reduce(tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0) ) tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean) tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2) tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight) tl.store(in_out_ptr0 + (r5 + 8192 * x4), tmp2, rmask & xmask) tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(tmp4_mean, tmp4_m2, tmp4_weight, 1) tmp4 = tmp4_tmp[:, None] tmp5 = tmp5_tmp[:, None] tmp6 = tmp6_tmp[:, None] tl.store(out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr1 + x4, tmp5, xmask) tl.store(out_ptr2 + x4, tmp6, xmask) @triton.jit def triton_per_fused_native_group_norm_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 32 RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 8 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 8 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (r1 + 8 * x0), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp12[:, None] tmp16 = 65536.0 tmp17 = tmp14 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp20, xmask) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x4 = xindex // 4096 x1 = xindex // 4096 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x4 // 16, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x4 // 16, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + x3, tmp10, None) @triton.jit def triton_poi_fused__to_copy_3(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_4(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 63, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_5(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_mul_sub_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 128 % 128 x0 = xindex % 128 x2 = xindex // 16384 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last') tmp29 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 64, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 64 * tmp4 + 4096 * x2), None, eviction_policy='evict_last') tmp11 = tmp10 + tmp1 tmp12 = tmp10 < 0 tmp13 = tl.where(tmp12, tmp11, tmp10) tmp14 = tl.load(in_ptr2 + (tmp13 + 64 * tmp4 + 4096 * x2), None, eviction_policy='evict_last') tmp15 = tmp14 - tmp9 tmp17 = tmp15 * tmp16 tmp18 = tmp9 + tmp17 tmp20 = tmp19 + tmp1 tmp21 = tmp19 < 0 tmp22 = tl.where(tmp21, tmp20, tmp19) tmp23 = tl.load(in_ptr2 + (tmp8 + 64 * tmp22 + 4096 * x2), None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr2 + (tmp13 + 64 * tmp22 + 4096 * x2), None, eviction_policy='evict_last') tmp25 = tmp24 - tmp23 tmp26 = tmp25 * tmp16 tmp27 = tmp23 + tmp26 tmp28 = tmp27 - tmp18 tmp30 = tmp28 * tmp29 tmp31 = tmp18 + tmp30 tl.store(in_out_ptr0 + x4, tmp31, None) @triton.jit def triton_red_fused_convolution_native_group_norm_7(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 512 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x4 = xindex x1 = xindex // 2 % 64 tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r3 = rindex tmp0 = tl.load(in_out_ptr0 + (r3 + 8192 * x4), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = (triton_helpers. welford_reduce(tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0) ) tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean) tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2) tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight) tl.store(in_out_ptr0 + (r3 + 8192 * x4), tmp2, rmask & xmask) tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(tmp4_mean, tmp4_m2, tmp4_weight, 1) tmp4 = tmp4_tmp[:, None] tmp5 = tmp5_tmp[:, None] tmp6 = tmp6_tmp[:, None] tl.store(out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr1 + x4, tmp5, xmask) tl.store(out_ptr2 + x4, tmp6, xmask) @triton.jit def triton_per_fused_native_group_norm_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 32 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 32 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (r1 + 32 * x0), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp12[:, None] tmp16 = 262144.0 tmp17 = tmp14 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp20, xmask) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x4 = xindex // 16384 x1 = xindex // 16384 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x4 // 16, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x4 // 16, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + x3, tmp10, None) @triton.jit def triton_poi_fused__to_copy_10(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_11(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 127, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_mul_sub_13(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 256 % 256 x0 = xindex % 256 x2 = xindex // 65536 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last') tmp29 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 128, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 128 * tmp4 + 16384 * x2), None, eviction_policy='evict_last') tmp11 = tmp10 + tmp1 tmp12 = tmp10 < 0 tmp13 = tl.where(tmp12, tmp11, tmp10) tmp14 = tl.load(in_ptr2 + (tmp13 + 128 * tmp4 + 16384 * x2), None, eviction_policy='evict_last') tmp15 = tmp14 - tmp9 tmp17 = tmp15 * tmp16 tmp18 = tmp9 + tmp17 tmp20 = tmp19 + tmp1 tmp21 = tmp19 < 0 tmp22 = tl.where(tmp21, tmp20, tmp19) tmp23 = tl.load(in_ptr2 + (tmp8 + 128 * tmp22 + 16384 * x2), None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr2 + (tmp13 + 128 * tmp22 + 16384 * x2), None, eviction_policy='evict_last') tmp25 = tmp24 - tmp23 tmp26 = tmp25 * tmp16 tmp27 = tmp23 + tmp26 tmp28 = tmp27 - tmp18 tmp30 = tmp28 * tmp29 tmp31 = tmp18 + tmp30 tl.store(in_out_ptr0 + x4, tmp31, None) @triton.jit def triton_poi_fused_convolution_tanh_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 65536 % 4 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x3, tmp3, None) @triton.jit def triton_poi_fused_add_mul_15(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 65536 x1 = xindex // 65536 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 262144 * x1), None) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_clamp_div_16(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 196608 x3 = xindex % 196608 x0 = xindex % 65536 x4 = xindex tmp0 = tl.load(in_ptr0 + (65536 + x3 + 262144 * x2), None) tmp1 = tl.load(in_ptr0 + (65536 + x0 + 262144 * x2), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (131072 + x0 + 262144 * x2), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (196608 + x0 + 262144 * x2), None, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp9 = libdevice.sqrt(tmp8) tmp10 = 1e-06 tmp11 = triton_helpers.maximum(tmp9, tmp10) tmp12 = tmp0 / tmp11 tl.store(out_ptr0 + x4, tmp12, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 256, 64, 64), (1048576, 4096, 64, 1)) assert_size_stride(primals_4, (128,), (1,)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64,), (1,)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (4, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 8, 1, 1, 8), (64, 8, 256, 256, 1), torch.float32) buf3 = empty_strided_cuda((4, 8, 1, 1, 8), (64, 8, 256, 256, 1), torch.float32) buf4 = empty_strided_cuda((4, 8, 1, 1, 8), (64, 8, 256, 256, 1), torch.float32) get_raw_stream(0) triton_red_fused_convolution_native_group_norm_0[grid(256)](buf1, primals_2, buf2, buf3, buf4, 256, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) del primals_2 buf5 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 1, 1), torch.float32) buf6 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf8 = reinterpret_tensor(buf6, (4, 8, 1, 1), (8, 1, 1, 1), 0) del buf6 triton_per_fused_native_group_norm_1[grid(32)](buf8, buf2, buf3, buf4, buf5, 32, 8, XBLOCK=32, num_warps=2, num_stages=1) del buf2 buf9 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) triton_poi_fused_native_group_norm_relu_2[grid(2097152)](buf1, buf5, buf8, primals_4, primals_5, buf9, 2097152, XBLOCK=1024, num_warps=4, num_stages=1) buf10 = empty_strided_cuda((128, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_3[grid(128)](buf10, 128, XBLOCK=128, num_warps=4, num_stages=1) buf11 = empty_strided_cuda((128, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_4[grid(128)](buf11, 128, XBLOCK=128, num_warps=4, num_stages=1) buf12 = empty_strided_cuda((128,), (1,), torch.int64) triton_poi_fused__to_copy_3[grid(128)](buf12, 128, XBLOCK=128, num_warps=4, num_stages=1) buf13 = empty_strided_cuda((128,), (1,), torch.int64) triton_poi_fused_add_clamp_4[grid(128)](buf13, 128, XBLOCK=128, num_warps=4, num_stages=1) buf14 = empty_strided_cuda((128,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_5[grid(128)](buf14, 128, XBLOCK=128, num_warps=4, num_stages=1) buf16 = empty_strided_cuda((128, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_5[grid(128)](buf16, 128, XBLOCK=128, num_warps=4, num_stages=1) buf15 = empty_strided_cuda((4, 128, 128, 128), (2097152, 16384, 128, 1), torch.float32) buf17 = buf15 del buf15 triton_poi_fused__unsafe_index_add_mul_sub_6[grid(8388608)](buf17, buf10, buf12, buf9, buf13, buf14, buf11, buf16, 8388608, XBLOCK =1024, num_warps=4, num_stages=1) del buf9 buf18 = extern_kernels.convolution(buf17, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 64, 128, 128), (1048576, 16384, 128, 1)) buf19 = buf18 del buf18 buf20 = empty_strided_cuda((4, 4, 1, 1, 32), (128, 32, 512, 512, 1), torch.float32) buf21 = empty_strided_cuda((4, 4, 1, 1, 32), (128, 32, 512, 512, 1), torch.float32) buf22 = empty_strided_cuda((4, 4, 1, 1, 32), (128, 32, 512, 512, 1), torch.float32) triton_red_fused_convolution_native_group_norm_7[grid(512)](buf19, primals_7, buf20, buf21, buf22, 512, 8192, XBLOCK=1, RBLOCK= 2048, num_warps=16, num_stages=1) del primals_7 buf23 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) buf24 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf26 = reinterpret_tensor(buf24, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf24 triton_per_fused_native_group_norm_8[grid(16)](buf26, buf20, buf21, buf22, buf23, 16, 32, XBLOCK=1, num_warps=2, num_stages=1) del buf20 del buf21 del buf22 buf27 = empty_strided_cuda((4, 64, 128, 128), (1048576, 16384, 128, 1), torch.float32) triton_poi_fused_native_group_norm_relu_9[grid(4194304)](buf19, buf23, buf26, primals_8, primals_9, buf27, 4194304, XBLOCK=512, num_warps=8, num_stages=1) buf28 = empty_strided_cuda((256, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_10[grid(256)](buf28, 256, XBLOCK=256, num_warps=4, num_stages=1) buf29 = empty_strided_cuda((256, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_11[grid(256)](buf29, 256, XBLOCK=256, num_warps=4, num_stages=1) buf30 = empty_strided_cuda((256,), (1,), torch.int64) triton_poi_fused__to_copy_10[grid(256)](buf30, 256, XBLOCK=256, num_warps=4, num_stages=1) buf31 = empty_strided_cuda((256,), (1,), torch.int64) triton_poi_fused_add_clamp_11[grid(256)](buf31, 256, XBLOCK=256, num_warps=4, num_stages=1) buf32 = reinterpret_tensor(buf4, (256,), (1,), 0) del buf4 triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12[grid(256)](buf32, 256, XBLOCK=256, num_warps=4, num_stages=1) buf34 = reinterpret_tensor(buf3, (256, 1), (1, 1), 0) del buf3 triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12[grid(256)](buf34, 256, XBLOCK=256, num_warps=4, num_stages=1) buf33 = empty_strided_cuda((4, 64, 256, 256), (4194304, 65536, 256, 1), torch.float32) buf35 = buf33 del buf33 triton_poi_fused__unsafe_index_add_mul_sub_13[grid(16777216)](buf35, buf28, buf30, buf27, buf31, buf32, buf29, buf34, 16777216, XBLOCK=1024, num_warps=4, num_stages=1) del buf27 buf36 = extern_kernels.convolution(buf35, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 4, 256, 256), (262144, 65536, 256, 1)) buf37 = buf36 del buf36 triton_poi_fused_convolution_tanh_14[grid(1048576)](buf37, primals_11, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf38 = empty_strided_cuda((4, 1, 256, 256), (65536, 65536, 256, 1), torch.float32) triton_poi_fused_add_mul_15[grid(262144)](buf37, buf38, 262144, XBLOCK=1024, num_warps=4, num_stages=1) buf39 = empty_strided_cuda((4, 3, 256, 256), (196608, 65536, 256, 1 ), torch.float32) triton_poi_fused_clamp_div_16[grid(786432)](buf37, buf39, 786432, XBLOCK=512, num_warps=8, num_stages=1) return (buf39, buf38, primals_1, primals_3, primals_4, primals_5, primals_6, primals_8, primals_9, primals_10, buf1, buf5, buf8, buf10, buf11, buf12, buf13, buf14, buf16, buf17, buf19, buf23, buf26, buf28, buf29, buf30, buf31, buf32, buf34, buf35, buf37, reinterpret_tensor(buf37, (4, 3, 256, 256), (262144, 65536, 256, 1), 65536)) class decoderVHNew(nn.Module): def __init__(self): super(decoderVHNew, self).__init__() self.dconv0 = nn.Conv2d(in_channels=256, out_channels=128, kernel_size=3, stride=1, padding=1, bias=True) self.dgn0 = nn.GroupNorm(8, 128) self.dconv1 = nn.Conv2d(in_channels=128, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True) self.dgn1 = nn.GroupNorm(4, 64) self.dconv2 = nn.Conv2d(in_channels=64, out_channels=4, kernel_size =3, stride=1, padding=1, bias=True) def forward(self, input_0): primals_1 = self.dconv0.weight primals_2 = self.dconv0.bias primals_4 = self.dgn0.weight primals_5 = self.dgn0.bias primals_6 = self.dconv1.weight primals_7 = self.dconv1.bias primals_8 = self.dgn1.weight primals_9 = self.dgn1.bias primals_10 = self.dconv2.weight primals_11 = self.dconv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0], output[1]
Miles629/TransparentShapeRealData
decoderVH
false
14,077
[ "MIT" ]
91
b81098a2d1882f5fd33fba6167d7258dbe02d6d2
https://github.com/Miles629/TransparentShapeRealData/tree/b81098a2d1882f5fd33fba6167d7258dbe02d6d2
Pool
import torch import torch.nn as nn import torch.nn.functional as F import torch.fx class Pool(nn.Module): def __init__(self): super(Pool, self).__init__() def forward(self, x): return F.adaptive_avg_pool2d(x, (5, 5)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.fx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__adaptive_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 5 % 5 x0 = xindex % 5 x2 = xindex // 25 x4 = xindex tmp0 = 4 * x1 // 5 tmp1 = (8 + 4 * x1) // 5 tmp2 = tmp0 < tmp1 tmp3 = 4 * x0 // 5 tmp4 = (8 + 4 * x0) // 5 tmp5 = tmp3 < tmp4 tmp6 = tmp2 & tmp5 tmp7 = tl.load(in_ptr0 + (4 * (4 * x1 // 5) + 16 * x2 + 4 * x0 // 5), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = 1 + 4 * x0 // 5 tmp9 = tmp8 < tmp4 tmp10 = tmp2 & tmp9 tmp11 = tl.load(in_ptr0 + (1 + 4 * (4 * x1 // 5) + 16 * x2 + 4 * x0 // 5), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tmp11 + tmp7 tmp13 = 1 + 4 * x1 // 5 tmp14 = tmp13 < tmp1 tmp15 = tmp14 & tmp5 tmp16 = tl.load(in_ptr0 + (4 + 4 * (4 * x1 // 5) + 16 * x2 + 4 * x0 // 5), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp16 + tmp12 tmp18 = tmp14 & tmp9 tmp19 = tl.load(in_ptr0 + (5 + 4 * (4 * x1 // 5) + 16 * x2 + 4 * x0 // 5), tmp18 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tmp19 + tmp17 tmp21 = 1.0 tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp6, tmp21, tmp22) tmp24 = tl.where(tmp10, tmp21, tmp22) tmp25 = tmp24 + tmp23 tmp26 = tl.where(tmp15, tmp21, tmp22) tmp27 = tmp26 + tmp25 tmp28 = tl.where(tmp18, tmp21, tmp22) tmp29 = tmp28 + tmp27 tmp30 = tmp20 / tmp29 tl.store(out_ptr0 + x4, tmp30, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32) get_raw_stream(0) triton_poi_fused__adaptive_avg_pool2d_0[grid(400)](arg0_1, buf0, 400, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class PoolNew(nn.Module): def __init__(self): super(PoolNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
NVIDIA/Torch-TensorRT
Pool
false
14,078
[ "BSD-3-Clause" ]
430
1a22204fecec690bc3c2a318dab4f57b98c57f05
https://github.com/NVIDIA/Torch-TensorRT/tree/1a22204fecec690bc3c2a318dab4f57b98c57f05
LoopFallbackNoEval
import torch import torch.nn as nn import torch.fx class LoopFallbackNoEval(nn.Module): def __init__(self): super(LoopFallbackNoEval, self).__init__() def forward(self, x): for _ in range(x.shape[1]): x = x + torch.ones_like(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.fx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_ones_like_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp3 = tmp2 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = tmp4 + tmp1 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_ones_like_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class LoopFallbackNoEvalNew(nn.Module): def __init__(self): super(LoopFallbackNoEvalNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
NVIDIA/Torch-TensorRT
LoopFallbackNoEval
false
14,079
[ "BSD-3-Clause" ]
430
1a22204fecec690bc3c2a318dab4f57b98c57f05
https://github.com/NVIDIA/Torch-TensorRT/tree/1a22204fecec690bc3c2a318dab4f57b98c57f05
KDLoss
import torch import torch.nn as nn import torch.nn.functional as F import torch.optim import torch._utils import torch.nn class KDLoss(nn.Module): """ Distilling the Knowledge in a Neural Network, NIPS2014. https://arxiv.org/pdf/1503.02531.pdf """ def __init__(self, T=1, loss_weight=1.0): super().__init__() self.loss_weight = loss_weight self.t = T def single_kl(self, s_preds, t_preds, mask=None): if mask is not None: if mask.sum() > 0: p = F.log_softmax(s_preds / self.t, dim=1)[mask] q = F.softmax(t_preds / self.t, dim=1)[mask] l_kl = F.kl_div(p, q, reduce=False) loss = torch.sum(l_kl) loss = loss / mask.sum() else: loss = torch.Tensor([0]) else: p = F.log_softmax(s_preds / self.t, dim=1) q = F.softmax(t_preds / self.t, dim=1) l_kl = F.kl_div(p, q, reduce=False) loss = l_kl.sum() / l_kl.size(0) return loss * self.t ** 2 def forward(self, s_preds, t_preds, masks=None): if masks is not None: assert isinstance(masks, list) and len(masks) == len(s_preds ), 'masks must be consistent with preds!' else: masks = [None for _ in range(len(s_preds))] loss = 0 for idx, (s, t) in enumerate(zip(s_preds, t_preds)): loss += self.single_kl(s, t, masks[idx]) return loss * self.loss_weight def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F import torch.optim import torch._utils import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + (128 + x3), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (132 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (136 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (140 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + (192 + x3), xmask) tmp3 = tl.load(in_ptr0 + (192 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (196 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (200 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (204 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + (192 + x3), xmask) tmp3 = tl.load(in_ptr0 + (192 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (196 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (200 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (204 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + (64 + x3), xmask) tmp3 = tl.load(in_ptr0 + (64 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (68 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (72 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (76 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + (64 + x3), xmask) tmp3 = tl.load(in_ptr0 + (64 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (68 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (72 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (76 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused__softmax_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + (128 + x3), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (132 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (136 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (140 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_per_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_8( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 4 r2 = rindex // 16 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 16 * r2), None, eviction_policy='evict_last' ) tmp2 = tl.load(in_ptr0 + (4 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr1 + r3, None) tmp18 = tl.load(in_ptr1 + (r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr1 + (4 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr1 + (8 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp26 = tl.load(in_ptr1 + (12 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp36 = tl.load(in_ptr2 + r3, None) tmp37 = tl.load(in_ptr2 + (r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp38 = tl.load(in_ptr2 + (4 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp40 = tl.load(in_ptr2 + (8 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp42 = tl.load(in_ptr2 + (12 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp51 = tl.load(in_ptr3 + r3, None) tmp52 = tl.load(in_ptr3 + (r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp54 = tl.load(in_ptr3 + (4 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp57 = tl.load(in_ptr3 + (8 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp60 = tl.load(in_ptr3 + (12 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp70 = tl.load(in_ptr4 + r3, None) tmp71 = tl.load(in_ptr4 + (r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp72 = tl.load(in_ptr4 + (4 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp74 = tl.load(in_ptr4 + (8 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp76 = tl.load(in_ptr4 + (12 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp85 = tl.load(in_ptr5 + r3, None) tmp86 = tl.load(in_ptr5 + (r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp88 = tl.load(in_ptr5 + (4 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp91 = tl.load(in_ptr5 + (8 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp94 = tl.load(in_ptr5 + (12 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp104 = tl.load(in_ptr6 + r3, None) tmp105 = tl.load(in_ptr6 + (r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp106 = tl.load(in_ptr6 + (4 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp108 = tl.load(in_ptr6 + (8 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp110 = tl.load(in_ptr6 + (12 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp119 = tl.load(in_ptr7 + r3, None) tmp120 = tl.load(in_ptr7 + (r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp122 = tl.load(in_ptr7 + (4 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp125 = tl.load(in_ptr7 + (8 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp128 = tl.load(in_ptr7 + (12 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float('nan') tmp16 = tl.where(tmp9, tmp15, tmp14) tmp19 = tl_math.exp(tmp18) tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tl_math.log(tmp28) tmp30 = tmp17 - tmp29 tmp31 = tmp8 * tmp30 tmp32 = tmp16 - tmp31 tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = tl.sum(tmp33, 1)[:, None] tmp39 = tmp37 + tmp38 tmp41 = tmp39 + tmp40 tmp43 = tmp41 + tmp42 tmp44 = tmp36 / tmp43 tmp45 = libdevice.isnan(tmp44).to(tl.int1) tmp46 = tmp44 == tmp10 tmp47 = tl_math.log(tmp44) tmp48 = tmp44 * tmp47 tmp49 = tl.where(tmp46, tmp10, tmp48) tmp50 = tl.where(tmp45, tmp15, tmp49) tmp53 = tl_math.exp(tmp52) tmp55 = tl_math.exp(tmp54) tmp56 = tmp53 + tmp55 tmp58 = tl_math.exp(tmp57) tmp59 = tmp56 + tmp58 tmp61 = tl_math.exp(tmp60) tmp62 = tmp59 + tmp61 tmp63 = tl_math.log(tmp62) tmp64 = tmp51 - tmp63 tmp65 = tmp44 * tmp64 tmp66 = tmp50 - tmp65 tmp67 = tl.broadcast_to(tmp66, [XBLOCK, RBLOCK]) tmp69 = tl.sum(tmp67, 1)[:, None] tmp73 = tmp71 + tmp72 tmp75 = tmp73 + tmp74 tmp77 = tmp75 + tmp76 tmp78 = tmp70 / tmp77 tmp79 = libdevice.isnan(tmp78).to(tl.int1) tmp80 = tmp78 == tmp10 tmp81 = tl_math.log(tmp78) tmp82 = tmp78 * tmp81 tmp83 = tl.where(tmp80, tmp10, tmp82) tmp84 = tl.where(tmp79, tmp15, tmp83) tmp87 = tl_math.exp(tmp86) tmp89 = tl_math.exp(tmp88) tmp90 = tmp87 + tmp89 tmp92 = tl_math.exp(tmp91) tmp93 = tmp90 + tmp92 tmp95 = tl_math.exp(tmp94) tmp96 = tmp93 + tmp95 tmp97 = tl_math.log(tmp96) tmp98 = tmp85 - tmp97 tmp99 = tmp78 * tmp98 tmp100 = tmp84 - tmp99 tmp101 = tl.broadcast_to(tmp100, [XBLOCK, RBLOCK]) tmp103 = tl.sum(tmp101, 1)[:, None] tmp107 = tmp105 + tmp106 tmp109 = tmp107 + tmp108 tmp111 = tmp109 + tmp110 tmp112 = tmp104 / tmp111 tmp113 = libdevice.isnan(tmp112).to(tl.int1) tmp114 = tmp112 == tmp10 tmp115 = tl_math.log(tmp112) tmp116 = tmp112 * tmp115 tmp117 = tl.where(tmp114, tmp10, tmp116) tmp118 = tl.where(tmp113, tmp15, tmp117) tmp121 = tl_math.exp(tmp120) tmp123 = tl_math.exp(tmp122) tmp124 = tmp121 + tmp123 tmp126 = tl_math.exp(tmp125) tmp127 = tmp124 + tmp126 tmp129 = tl_math.exp(tmp128) tmp130 = tmp127 + tmp129 tmp131 = tl_math.log(tmp130) tmp132 = tmp119 - tmp131 tmp133 = tmp112 * tmp132 tmp134 = tmp118 - tmp133 tmp135 = tl.broadcast_to(tmp134, [XBLOCK, RBLOCK]) tmp137 = tl.sum(tmp135, 1)[:, None] tmp138 = 0.25 tmp139 = tmp35 * tmp138 tmp140 = 1.0 tmp141 = tmp139 * tmp140 tmp142 = tmp141 + tmp10 tmp143 = tmp69 * tmp138 tmp144 = tmp143 * tmp140 tmp145 = tmp142 + tmp144 tmp146 = tmp103 * tmp138 tmp147 = tmp146 * tmp140 tmp148 = tmp145 + tmp147 tmp149 = tmp137 * tmp138 tmp150 = tmp149 * tmp140 tmp151 = tmp148 + tmp150 tmp152 = tmp151 * tmp140 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp152, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_1[grid(64)](arg0_1, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(64)](arg1_1, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_3[grid(64)](arg0_1, buf14, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_4[grid(64)](arg0_1, buf2, 64, XBLOCK=64, num_warps =1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_5[grid(64)](arg1_1, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_6[grid(64)](arg0_1, buf6, 64, XBLOCK=64, num_warps =1, num_stages=1) del arg0_1 buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_7[grid(64)](arg1_1, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg1_1 buf11 = empty_strided_cuda((), (), torch.float32) buf16 = buf11 del buf11 triton_per_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_8[grid (1)](buf16, buf0, buf2, buf4, buf6, buf8, buf10, buf12, buf14, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf10 del buf12 del buf14 del buf2 del buf4 del buf6 del buf8 return buf16, class KDLossNew(nn.Module): """ Distilling the Knowledge in a Neural Network, NIPS2014. https://arxiv.org/pdf/1503.02531.pdf """ def __init__(self, T=1, loss_weight=1.0): super().__init__() self.loss_weight = loss_weight self.t = T def single_kl(self, s_preds, t_preds, mask=None): if mask is not None: if mask.sum() > 0: p = F.log_softmax(s_preds / self.t, dim=1)[mask] q = F.softmax(t_preds / self.t, dim=1)[mask] l_kl = F.kl_div(p, q, reduce=False) loss = torch.sum(l_kl) loss = loss / mask.sum() else: loss = torch.Tensor([0]) else: p = F.log_softmax(s_preds / self.t, dim=1) q = F.softmax(t_preds / self.t, dim=1) l_kl = F.kl_div(p, q, reduce=False) loss = l_kl.sum() / l_kl.size(0) return loss * self.t ** 2 def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ModelTC/EOD
KDLoss
false
14,080
[ "Apache-2.0" ]
196
164bff80486e9ae6a095a97667b365c46ceabd86
https://github.com/ModelTC/EOD/tree/164bff80486e9ae6a095a97667b365c46ceabd86
SeqExpandConv
import torch import torch.nn as nn from math import sqrt as sqrt class SeqExpandConv(nn.Module): def __init__(self, in_channels, out_channels, seq_length): super(SeqExpandConv, self).__init__() self.conv = nn.Conv3d(in_channels, out_channels, kernel_size=(3, 1, 1), padding=(1, 0, 0), bias=False) self.seq_length = seq_length def forward(self, x): batch_size, in_channels, height, width = x.shape x = x.view(batch_size // self.seq_length, self.seq_length, in_channels, height, width) x = self.conv(x.transpose(1, 2).contiguous()).transpose(2, 1 ).contiguous() x = x.flatten(0, 1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'seq_length': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from math import sqrt as sqrt assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 % 4 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2 + 64 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 1, 1), (12, 3, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](primals_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1, 1), padding=(1, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf1, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf2 = empty_strided_cuda((1, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_clone_0[grid(256)](buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_2, buf0 class SeqExpandConvNew(nn.Module): def __init__(self, in_channels, out_channels, seq_length): super(SeqExpandConvNew, self).__init__() self.conv = nn.Conv3d(in_channels, out_channels, kernel_size=(3, 1, 1), padding=(1, 0, 0), bias=False) self.seq_length = seq_length def forward(self, input_0): primals_2 = self.conv.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
NTech-Lab/deepfake-detection-challenge
SeqExpandConv
false
14,081
[ "Apache-2.0" ]
98
52095ce4a49f298faf075a5eb28391722b9e4103
https://github.com/NTech-Lab/deepfake-detection-challenge/tree/52095ce4a49f298faf075a5eb28391722b9e4103
KLLoss
import torch import torch.nn as nn import torch.nn.functional as F import torch.optim import torch._utils import torch.nn class KLLoss(nn.Module): """ KL Divergence loss """ def __init__(self, norm='softmax', loss_weight=1.0): super(KLLoss, self).__init__() self.loss_weight = loss_weight self.norm = norm def forward(self, s_features, t_features, **kwargs): loss = 0 for s, t in zip(s_features, t_features): loss += self.kl(s, t) return loss * self.loss_weight def kl(self, pred_feas, target_feas): crit = nn.KLDivLoss(reduction='batchmean') relu = nn.ReLU() s = relu(pred_feas) t = relu(target_feas) if self.norm == 'softmax': s = F.log_softmax(s, dim=1) t = F.softmax(t, dim=1) t.detach_() loss = crit(s, t) elif self.norm == 'l2': loss = torch.sum(t / torch.sum(t) * torch.log((t / torch.sum(t) + 1e-06) / (s / torch.sum(s) + 1e-06))) else: None return None return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F import torch.optim import torch._utils import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_stack_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 x0 = xindex % 16 x2 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (192 + x0 + 16 * x1), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (192 + x0 + 16 * (-4 + x1)), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (128 + x0 + 16 * (-8 + x1)), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 16, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr1 + (128 + x0 + 16 * (-12 + x1)), tmp19 & xmask, other=0.0) tmp21 = tmp0 >= tmp17 tmp22 = tl.full([1], 20, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = tmp21 & tmp23 tmp25 = tl.load(in_ptr0 + (64 + x0 + 16 * (-16 + x1)), tmp24 & xmask, other=0.0) tmp26 = tmp0 >= tmp22 tl.full([1], 24, tl.int64) tmp29 = tl.load(in_ptr1 + (64 + x0 + 16 * (-20 + x1)), tmp26 & xmask, other=0.0) tmp30 = tl.where(tmp24, tmp25, tmp29) tmp31 = tl.where(tmp19, tmp20, tmp30) tmp32 = tl.where(tmp14, tmp15, tmp31) tmp33 = tl.where(tmp9, tmp10, tmp32) tmp34 = tl.where(tmp4, tmp5, tmp33) tl.store(out_ptr0 + x2, tmp34, xmask) @triton.jit def triton_poi_fused__softmax_relu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp1, tmp3) tmp6 = triton_helpers.maximum(tmp1, tmp5) tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = triton_helpers.maximum(tmp1, tmp8) tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = triton_helpers.maximum(tmp1, tmp11) tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tl_math.exp(tmp14) tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + (128 + x3), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (132 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (136 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (140 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp1, tmp3) tmp6 = triton_helpers.maximum(tmp1, tmp5) tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = triton_helpers.maximum(tmp1, tmp8) tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = triton_helpers.maximum(tmp1, tmp11) tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tl.store(out_ptr0 + x3, tmp14, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + (64 + x3), xmask) tmp3 = tl.load(in_ptr0 + (64 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (68 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (72 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (76 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp1, tmp3) tmp6 = triton_helpers.maximum(tmp1, tmp5) tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = triton_helpers.maximum(tmp1, tmp8) tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = triton_helpers.maximum(tmp1, tmp11) tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tl_math.exp(tmp14) tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused__log_softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp1, tmp3) tmp6 = triton_helpers.maximum(tmp1, tmp5) tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = triton_helpers.maximum(tmp1, tmp8) tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = triton_helpers.maximum(tmp1, tmp11) tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tl.store(out_ptr0 + x3, tmp14, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + (320 + x3), xmask) tmp3 = tl.load(in_ptr0 + (320 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (324 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (328 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (332 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp1, tmp3) tmp6 = triton_helpers.maximum(tmp1, tmp5) tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = triton_helpers.maximum(tmp1, tmp8) tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = triton_helpers.maximum(tmp1, tmp11) tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tl_math.exp(tmp14) tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused__log_softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + (256 + x3), xmask) tmp3 = tl.load(in_ptr0 + (256 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (260 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (264 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (268 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp1, tmp3) tmp6 = triton_helpers.maximum(tmp1, tmp5) tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = triton_helpers.maximum(tmp1, tmp8) tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = triton_helpers.maximum(tmp1, tmp11) tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tl.store(out_ptr0 + x3, tmp14, xmask) @triton.jit def triton_poi_fused__softmax_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + (192 + x3), xmask) tmp3 = tl.load(in_ptr0 + (192 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (196 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (200 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (204 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp1, tmp3) tmp6 = triton_helpers.maximum(tmp1, tmp5) tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = triton_helpers.maximum(tmp1, tmp8) tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = triton_helpers.maximum(tmp1, tmp11) tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tl_math.exp(tmp14) tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_per_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_8( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 4 r2 = rindex // 16 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 16 * r2), None, eviction_policy='evict_last' ) tmp2 = tl.load(in_ptr0 + (4 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr1 + r3, None) tmp18 = tl.load(in_ptr1 + (r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr1 + (4 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr1 + (8 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp26 = tl.load(in_ptr1 + (12 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp36 = tl.load(in_ptr2 + r3, None) tmp37 = tl.load(in_ptr2 + (r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp38 = tl.load(in_ptr2 + (4 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp40 = tl.load(in_ptr2 + (8 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp42 = tl.load(in_ptr2 + (12 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp51 = tl.load(in_ptr3 + r3, None) tmp52 = tl.load(in_ptr3 + (r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp54 = tl.load(in_ptr3 + (4 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp57 = tl.load(in_ptr3 + (8 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp60 = tl.load(in_ptr3 + (12 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp70 = tl.load(in_ptr4 + r3, None) tmp71 = tl.load(in_ptr4 + (r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp72 = tl.load(in_ptr4 + (4 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp74 = tl.load(in_ptr4 + (8 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp76 = tl.load(in_ptr4 + (12 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp85 = tl.load(in_ptr5 + r3, None) tmp86 = tl.load(in_ptr5 + (r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp88 = tl.load(in_ptr5 + (4 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp91 = tl.load(in_ptr5 + (8 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp94 = tl.load(in_ptr5 + (12 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp104 = tl.load(in_ptr6 + r3, None) tmp105 = tl.load(in_ptr6 + (r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp106 = tl.load(in_ptr6 + (4 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp108 = tl.load(in_ptr6 + (8 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp110 = tl.load(in_ptr6 + (12 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp119 = tl.load(in_ptr7 + r3, None) tmp120 = tl.load(in_ptr7 + (r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp122 = tl.load(in_ptr7 + (4 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp125 = tl.load(in_ptr7 + (8 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp128 = tl.load(in_ptr7 + (12 + r0 + 16 * r2), None, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float('nan') tmp16 = tl.where(tmp9, tmp15, tmp14) tmp19 = tl_math.exp(tmp18) tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tl_math.log(tmp28) tmp30 = tmp17 - tmp29 tmp31 = tmp8 * tmp30 tmp32 = tmp16 - tmp31 tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = tl.sum(tmp33, 1)[:, None] tmp39 = tmp37 + tmp38 tmp41 = tmp39 + tmp40 tmp43 = tmp41 + tmp42 tmp44 = tmp36 / tmp43 tmp45 = libdevice.isnan(tmp44).to(tl.int1) tmp46 = tmp44 == tmp10 tmp47 = tl_math.log(tmp44) tmp48 = tmp44 * tmp47 tmp49 = tl.where(tmp46, tmp10, tmp48) tmp50 = tl.where(tmp45, tmp15, tmp49) tmp53 = tl_math.exp(tmp52) tmp55 = tl_math.exp(tmp54) tmp56 = tmp53 + tmp55 tmp58 = tl_math.exp(tmp57) tmp59 = tmp56 + tmp58 tmp61 = tl_math.exp(tmp60) tmp62 = tmp59 + tmp61 tmp63 = tl_math.log(tmp62) tmp64 = tmp51 - tmp63 tmp65 = tmp44 * tmp64 tmp66 = tmp50 - tmp65 tmp67 = tl.broadcast_to(tmp66, [XBLOCK, RBLOCK]) tmp69 = tl.sum(tmp67, 1)[:, None] tmp73 = tmp71 + tmp72 tmp75 = tmp73 + tmp74 tmp77 = tmp75 + tmp76 tmp78 = tmp70 / tmp77 tmp79 = libdevice.isnan(tmp78).to(tl.int1) tmp80 = tmp78 == tmp10 tmp81 = tl_math.log(tmp78) tmp82 = tmp78 * tmp81 tmp83 = tl.where(tmp80, tmp10, tmp82) tmp84 = tl.where(tmp79, tmp15, tmp83) tmp87 = tl_math.exp(tmp86) tmp89 = tl_math.exp(tmp88) tmp90 = tmp87 + tmp89 tmp92 = tl_math.exp(tmp91) tmp93 = tmp90 + tmp92 tmp95 = tl_math.exp(tmp94) tmp96 = tmp93 + tmp95 tmp97 = tl_math.log(tmp96) tmp98 = tmp85 - tmp97 tmp99 = tmp78 * tmp98 tmp100 = tmp84 - tmp99 tmp101 = tl.broadcast_to(tmp100, [XBLOCK, RBLOCK]) tmp103 = tl.sum(tmp101, 1)[:, None] tmp107 = tmp105 + tmp106 tmp109 = tmp107 + tmp108 tmp111 = tmp109 + tmp110 tmp112 = tmp104 / tmp111 tmp113 = libdevice.isnan(tmp112).to(tl.int1) tmp114 = tmp112 == tmp10 tmp115 = tl_math.log(tmp112) tmp116 = tmp112 * tmp115 tmp117 = tl.where(tmp114, tmp10, tmp116) tmp118 = tl.where(tmp113, tmp15, tmp117) tmp121 = tl_math.exp(tmp120) tmp123 = tl_math.exp(tmp122) tmp124 = tmp121 + tmp123 tmp126 = tl_math.exp(tmp125) tmp127 = tmp124 + tmp126 tmp129 = tl_math.exp(tmp128) tmp130 = tmp127 + tmp129 tmp131 = tl_math.log(tmp130) tmp132 = tmp119 - tmp131 tmp133 = tmp112 * tmp132 tmp134 = tmp118 - tmp133 tmp135 = tl.broadcast_to(tmp134, [XBLOCK, RBLOCK]) tmp137 = tl.sum(tmp135, 1)[:, None] tmp138 = 0.25 tmp139 = tmp35 * tmp138 tmp140 = tmp139 + tmp10 tmp141 = tmp69 * tmp138 tmp142 = tmp140 + tmp141 tmp143 = tmp103 * tmp138 tmp144 = tmp142 + tmp143 tmp145 = tmp137 * tmp138 tmp146 = tmp144 + tmp145 tmp147 = 1.0 tmp148 = tmp146 * tmp147 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp148, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((24, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_stack_0[grid(384)](arg0_1, arg1_1, buf0, 384, XBLOCK=256, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_relu_1[grid(64)](arg1_1, buf1, 64, XBLOCK =64, num_warps=1, num_stages=1) del arg1_1 buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__log_softmax_2[grid(64)](buf0, buf11, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(64)](buf0, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__log_softmax_4[grid(64)](buf0, buf15, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__log_softmax_4[grid(64)](arg0_1, buf3, 64, XBLOCK= 64, num_warps=1, num_stages=1) del arg0_1 buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_5[grid(64)](buf0, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__log_softmax_6[grid(64)](buf0, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_7[grid(64)](buf0, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 buf12 = empty_strided_cuda((), (), torch.float32) buf17 = buf12 del buf12 triton_per_fused__log_softmax__softmax_add_div_mul_sub_sum_xlogy_8[grid (1)](buf17, buf1, buf3, buf5, buf7, buf9, buf11, buf13, buf15, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf1 del buf11 del buf13 del buf15 del buf3 del buf5 del buf7 del buf9 return buf17, class KLLossNew(nn.Module): """ KL Divergence loss """ def __init__(self, norm='softmax', loss_weight=1.0): super(KLLossNew, self).__init__() self.loss_weight = loss_weight self.norm = norm def kl(self, pred_feas, target_feas): crit = nn.KLDivLoss(reduction='batchmean') relu = nn.ReLU() s = relu(pred_feas) t = relu(target_feas) if self.norm == 'softmax': s = F.log_softmax(s, dim=1) t = F.softmax(t, dim=1) t.detach_() loss = crit(s, t) elif self.norm == 'l2': loss = torch.sum(t / torch.sum(t) * torch.log((t / torch.sum(t) + 1e-06) / (s / torch.sum(s) + 1e-06))) else: None return None return loss def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ModelTC/EOD
KLLoss
false
14,082
[ "Apache-2.0" ]
196
164bff80486e9ae6a095a97667b365c46ceabd86
https://github.com/ModelTC/EOD/tree/164bff80486e9ae6a095a97667b365c46ceabd86
Norm
import torch import torch.fx class Norm(torch.nn.Module): def __init__(self): super(Norm, self).__init__() def forward(self, x): return torch.norm(x, 2, None, False) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.fx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_linalg_vector_norm_0(in_out_ptr0, in_ptr0, xnumel, rnumel ): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [RBLOCK]) tmp4 = triton_helpers.promote_to_tensor(tl.sum(tmp2, 0)) tmp5 = libdevice.sqrt(tmp4) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp5, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_linalg_vector_norm_0[grid(1)](buf1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 return buf1, class NormNew(torch.nn.Module): def __init__(self): super(NormNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
NVIDIA/Torch-TensorRT
Norm
false
14,083
[ "BSD-3-Clause" ]
430
1a22204fecec690bc3c2a318dab4f57b98c57f05
https://github.com/NVIDIA/Torch-TensorRT/tree/1a22204fecec690bc3c2a318dab4f57b98c57f05
ModuleFallbackSub
import torch import torch.nn as nn import torch.fx class ModuleFallbackSub(nn.Module): def __init__(self): super(ModuleFallbackSub, self).__init__() self.conv = nn.Conv2d(1, 3, 3) self.relu = nn.ReLU() def forward(self, x): return self.relu(self.conv(x)) def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.fx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 46128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = xindex // 3844 % 3 x0 = xindex % 3844 x3 = xindex // 3844 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x0 + 3968 * x3), tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (3, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (3,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 3, 62, 62), (11532, 3844, 62, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 3, 62, 62), (11904, 3968, 62, 1), torch.bool) get_raw_stream(0) triton_poi_fused_convolution_relu_threshold_backward_0[grid(46128)]( buf1, primals_2, buf2, 46128, XBLOCK=512, num_warps=4, num_stages=1 ) del primals_2 return buf1, primals_1, primals_3, buf2 class ModuleFallbackSubNew(nn.Module): def __init__(self): super(ModuleFallbackSubNew, self).__init__() self.conv = nn.Conv2d(1, 3, 3) self.relu = nn.ReLU() def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
NVIDIA/Torch-TensorRT
ModuleFallbackSub
false
14,084
[ "BSD-3-Clause" ]
430
1a22204fecec690bc3c2a318dab4f57b98c57f05
https://github.com/NVIDIA/Torch-TensorRT/tree/1a22204fecec690bc3c2a318dab4f57b98c57f05
NoiseLayer
import torch import torch.nn as nn class NoiseLayer(nn.Module): """adds noise. noise is per pixel (constant over channels) with per-channel weight""" def __init__(self, channels): super().__init__() self.weight = nn.Parameter(torch.zeros(channels)) self.noise = None def forward(self, x, noise=None): if noise is None and self.noise is None: noise = torch.randn(x.size(0), 1, x.size(2), x.size(3), device= x.device, dtype=x.dtype) elif noise is None: noise = self.noise x = x + self.weight.view(1, -1, 1, 1) * noise return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4}]
import torch from torch import device import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tl.store(out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten.randn.default([4, 1, 4, 4], dtype=torch. float32, device=device(type='cuda', index=0), pin_memory=False) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](primals_1, primals_2, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf2, buf1 class NoiseLayerNew(nn.Module): """adds noise. noise is per pixel (constant over channels) with per-channel weight""" def __init__(self, channels): super().__init__() self.weight = nn.Parameter(torch.zeros(channels)) self.noise = None def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
NeuralBending/StyleCLIP
NoiseLayer
false
14,085
[ "MIT" ]
91
190d3a0d48823ccdbdd15c7f8af6e08703a6dbd8
https://github.com/NeuralBending/StyleCLIP/tree/190d3a0d48823ccdbdd15c7f8af6e08703a6dbd8
TestModel
import torch import torch.nn as nn import torch.fx class TestModel(nn.Module): def __init__(self): super().__init__() self.a = torch.nn.Module() self.b = torch.nn.Module() self.a.weights = torch.nn.Parameter(torch.randn(1, 2)) self.b.weights = torch.nn.Parameter(torch.randn(1)) def forward(self, x): return x + self.a.weights + self.b.weights def get_inputs(): return [torch.rand([4, 4, 4, 2])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.fx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp5 = tmp2 + tmp4 tl.store(out_ptr0 + x2, tmp5, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 2), (2, 1)) assert_size_stride(primals_2, (4, 4, 4, 2), (32, 8, 2, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(128)](primals_2, primals_1, primals_3, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 del primals_3 return buf0, class TestModelNew(nn.Module): def __init__(self): super().__init__() self.a = torch.nn.Module() self.b = torch.nn.Module() self.a.weights = torch.nn.Parameter(torch.randn(1, 2)) self.b.weights = torch.nn.Parameter(torch.randn(1)) def forward(self, input_0): primals_1 = self.a.weights primals_3 = self.b.weights primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
NVIDIA/Torch-TensorRT
TestModel
false
14,086
[ "BSD-3-Clause" ]
430
1a22204fecec690bc3c2a318dab4f57b98c57f05
https://github.com/NVIDIA/Torch-TensorRT/tree/1a22204fecec690bc3c2a318dab4f57b98c57f05
FEM
import torch import torch.nn as nn import torch.nn.functional as F from math import sqrt as sqrt class FEM(nn.Module): def __init__(self, channel_size): super(FEM, self).__init__() self.cs = channel_size self.cpm1 = nn.Conv2d(self.cs, 256, kernel_size=3, dilation=1, stride=1, padding=1) self.cpm2 = nn.Conv2d(self.cs, 256, kernel_size=3, dilation=2, stride=1, padding=2) self.cpm3 = nn.Conv2d(256, 128, kernel_size=3, dilation=1, stride=1, padding=1) self.cpm4 = nn.Conv2d(256, 128, kernel_size=3, dilation=2, stride=1, padding=2) self.cpm5 = nn.Conv2d(128, 128, kernel_size=3, dilation=1, stride=1, padding=1) def forward(self, x): x1_1 = F.relu(self.cpm1(x), inplace=True) x1_2 = F.relu(self.cpm2(x), inplace=True) x2_1 = F.relu(self.cpm3(x1_2), inplace=True) x2_2 = F.relu(self.cpm4(x1_2), inplace=True) x3_1 = F.relu(self.cpm5(x2_2), inplace=True) return torch.cat((x1_1, x2_1, x3_1), 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from math import sqrt as sqrt assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 16 % 512 x0 = xindex % 16 x2 = xindex // 8192 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 256, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 4096 * x2), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 384, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tmp12 & tmp14 tmp16 = tl.load(in_ptr2 + (x0 + 16 * (-256 + x1) + 2048 * x2), tmp15, other=0.0) tmp17 = tl.load(in_ptr3 + (-256 + x1), tmp15, eviction_policy= 'evict_last', other=0.0) tmp18 = tmp16 + tmp17 tmp19 = triton_helpers.maximum(tmp8, tmp18) tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp15, tmp19, tmp20) tmp22 = tmp0 >= tmp13 tl.full([1], 512, tl.int64) tmp25 = tl.load(in_ptr4 + (x0 + 16 * (-384 + x1) + 2048 * x2), tmp22, other=0.0) tmp26 = tl.load(in_ptr5 + (-384 + x1), tmp22, eviction_policy= 'evict_last', other=0.0) tmp27 = tmp25 + tmp26 tmp28 = triton_helpers.maximum(tmp8, tmp27) tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype) tmp30 = tl.where(tmp22, tmp28, tmp29) tmp31 = tl.where(tmp15, tmp21, tmp30) tmp32 = tl.where(tmp4, tmp11, tmp31) tl.store(out_ptr0 + x3, tmp32, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 256 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (256, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (256, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (128,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 256, 4, 4), (4096, 16, 4, 1)) buf1 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 256, 4, 4), (4096, 16, 4, 1)) buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(16384)](buf2, primals_5, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf3 = extern_kernels.convolution(buf2, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 128, 4, 4), (2048, 16, 4, 1)) buf4 = extern_kernels.convolution(buf2, primals_8, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 128, 4, 4), (2048, 16, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_1[grid(8192)](buf5, primals_9, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf6 = extern_kernels.convolution(buf5, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 128, 4, 4), (2048, 16, 4, 1)) buf7 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch. float32) triton_poi_fused_cat_2[grid(32768)](buf0, primals_2, buf3, primals_7, buf6, primals_11, buf7, 32768, XBLOCK=256, num_warps =4, num_stages=1) buf8 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_3[grid(8192)](buf6 , primals_11, buf8, 8192, XBLOCK=256, num_warps=4, num_stages=1) del buf6 del primals_11 buf9 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_3[grid(8192)](buf3 , primals_7, buf9, 8192, XBLOCK=256, num_warps=4, num_stages=1) del buf3 del primals_7 buf10 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.bool ) triton_poi_fused_convolution_relu_threshold_backward_4[grid(16384)]( buf0, primals_2, buf10, 16384, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 return (buf7, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf2, buf5, buf8, buf9, buf10) class FEMNew(nn.Module): def __init__(self, channel_size): super(FEMNew, self).__init__() self.cs = channel_size self.cpm1 = nn.Conv2d(self.cs, 256, kernel_size=3, dilation=1, stride=1, padding=1) self.cpm2 = nn.Conv2d(self.cs, 256, kernel_size=3, dilation=2, stride=1, padding=2) self.cpm3 = nn.Conv2d(256, 128, kernel_size=3, dilation=1, stride=1, padding=1) self.cpm4 = nn.Conv2d(256, 128, kernel_size=3, dilation=2, stride=1, padding=2) self.cpm5 = nn.Conv2d(128, 128, kernel_size=3, dilation=1, stride=1, padding=1) def forward(self, input_0): primals_1 = self.cpm1.weight primals_2 = self.cpm1.bias primals_4 = self.cpm2.weight primals_5 = self.cpm2.bias primals_6 = self.cpm3.weight primals_7 = self.cpm3.bias primals_8 = self.cpm4.weight primals_9 = self.cpm4.bias primals_10 = self.cpm5.weight primals_11 = self.cpm5.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
NTech-Lab/deepfake-detection-challenge
FEM
false
14,087
[ "Apache-2.0" ]
98
52095ce4a49f298faf075a5eb28391722b9e4103
https://github.com/NTech-Lab/deepfake-detection-challenge/tree/52095ce4a49f298faf075a5eb28391722b9e4103
Encoder
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F from collections import OrderedDict import torch.optim import torch._utils import torch.nn def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0],) + (1,) * (x.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x. device) random_tensor.floor_() output = x.div(keep_prob) * random_tensor return output def activation(act_type='swish'): if act_type == 'swish': act = swish() return act else: act = nn.ReLU(inplace=True) return act class DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) class swish(nn.Module): def __init__(self): super(swish, self).__init__() def forward(self, x): x = x * torch.sigmoid(x) return x class GELU(nn.Module): @staticmethod def forward(x): erf = F.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * torch.pow(x, 3))) return 0.5 * x * (1 + erf) class FeedForward(nn.Module): def __init__(self, dim, hidden_dim, dropout=0.1, activation=GELU): super(FeedForward, self).__init__() self.mlp1 = nn.Linear(dim, hidden_dim) self.act = activation() self.mlp2 = nn.Linear(hidden_dim, dim) self.dropout = nn.Dropout(dropout) def forward(self, x): x = self.mlp1(x) x = self.act(x) x = self.dropout(x) x = self.mlp2(x) x = self.dropout(x) return x class MultiHeadAttention(nn.Module): def __init__(self, dim, heads=8, dropout=0.1, attention_dropout=0.1, qkv_bias=True): super(MultiHeadAttention, self).__init__() assert dim % heads == 0 self.heads = heads self.scale = (dim // heads) ** -0.5 self.to_qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.to_out = nn.Linear(dim, dim, bias=True) self.dropout = nn.Dropout(dropout) self.attention_dropout = nn.Dropout(attention_dropout) def forward(self, x): B, N, C = x.shape qkv = self.to_qkv(x).reshape(B, N, 3, self.heads, C // self.heads ).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] attn = q @ k.transpose(-2, -1) * self.scale attn = attn.softmax(dim=-1) attn = self.attention_dropout(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.to_out(x) x = self.dropout(x) return x class Encoder1DBlock(nn.Module): def __init__(self, hidden_dim, mlp_dim, heads, dropout, attention_dropout, drop_path, qkv_bias, activation, norm_layer=nn. LayerNorm): super(Encoder1DBlock, self).__init__() self.norm1 = norm_layer(hidden_dim) self.attention = MultiHeadAttention(hidden_dim, heads, dropout, attention_dropout, qkv_bias) self.norm2 = norm_layer(hidden_dim) self.feedforward = FeedForward(hidden_dim, mlp_dim, dropout, activation=activation) self.drop_path = DropPath(drop_path) if drop_path > 0.0 else None def forward(self, x): residual = x x = self.norm1(x) x = self.attention(x) if self.drop_path is not None: x = self.drop_path(x) x = x + residual y = self.norm2(x) y = self.feedforward(y) if self.drop_path is not None: y = self.drop_path(y) return x + y class Encoder(nn.Module): def __init__(self, hidden_dim, depth, mlp_dim, heads, dropout=0.1, attention_dropout=0.1, drop_path=0.1, qkv_bias=True, activation=GELU): super(Encoder, self).__init__() encoder_layer = OrderedDict() for d in range(depth): encoder_layer['encoder_{}'.format(d)] = Encoder1DBlock(hidden_dim, mlp_dim, heads, dropout, attention_dropout, drop_path, qkv_bias, activation) self.encoders = nn.Sequential(encoder_layer) self.encoder_norm = nn.LayerNorm(hidden_dim) def forward(self, x): x = self.encoders(x) x = self.encoder_norm(x) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'hidden_dim': 4, 'depth': 1, 'mlp_dim': 4, 'heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np import torch.nn as nn import torch.nn.functional as F from collections import OrderedDict import torch.optim import torch._utils import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4 + y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (8 + y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (8 + y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + 1) tmp8 = tl.broadcast_to(tmp7, [XBLOCK]) tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + 2) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + 3) tmp22 = tl.broadcast_to(tmp21, [XBLOCK]) tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tmp0 + tmp2 tmp5 = tmp3 + tmp4 tmp9 = tmp6 + tmp8 tmp11 = tmp9 + tmp10 tmp12 = tmp5 + tmp11 tmp16 = tmp13 + tmp15 tmp18 = tmp16 + tmp17 tmp19 = tmp12 + tmp18 tmp23 = tmp20 + tmp22 tmp25 = tmp23 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + x0, tmp28, xmask) tl.store(out_ptr1 + x0, tmp40, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x2, xmask) tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp6 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_add_mul_pow_sqrt_tanh_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp0 * tmp0 tmp4 = tmp3 * tmp0 tmp5 = 0.044715 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp8 = 0.7978845608028654 tmp9 = tmp8 * tmp7 tmp10 = libdevice.tanh(tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp2 * tmp12 tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x2, xmask) tmp5 = tl.load(in_out_ptr0 + x2, xmask) tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp4 + tmp7 tl.store(in_out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 4), (4, 1)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_1, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](primals_1, buf0, buf1, primals_2, primals_3, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 del primals_3 buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_2[grid(16, 4)](buf3, primals_5, buf4, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(16, 4)](buf3, primals_5, buf5, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(256)](buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused__softmax_5[grid(256)](buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf7 buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_6[grid(16, 4)](buf3, primals_5, buf9, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del buf3 del primals_5 buf10 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_7[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0) del buf10 extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf12) buf13 = buf1 del buf1 buf14 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_8[grid(16)](buf12, primals_7, primals_1, buf13, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_9[grid(64)](buf12, primals_7, primals_1, buf13, buf14, primals_8, primals_9, buf15, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_9 buf16 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_11 buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_pow_sqrt_tanh_10[grid(64)](buf16, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1) buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf17, (16, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), out=buf18) buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0) del buf18 triton_poi_fused_add_11[grid(64)](buf19, buf12, primals_7, primals_1, primals_13, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_13 buf20 = buf14 del buf14 buf21 = buf13 del buf13 triton_poi_fused_native_layer_norm_0[grid(16)](buf19, buf20, buf21, 16, XBLOCK=16, num_warps=1, num_stages=1) buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](buf19, buf20, buf21, primals_14, primals_15, buf22, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf20 del buf21 del primals_15 return (buf22, primals_1, primals_7, primals_8, primals_14, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), buf16, reinterpret_tensor(buf17, (16, 4), (4, 1), 0), buf19, primals_12, primals_10, primals_6, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_4) def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0],) + (1,) * (x.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x. device) random_tensor.floor_() output = x.div(keep_prob) * random_tensor return output def activation(act_type='swish'): if act_type == 'swish': act = swish() return act else: act = nn.ReLU(inplace=True) return act class DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) class swish(nn.Module): def __init__(self): super(swish, self).__init__() def forward(self, x): x = x * torch.sigmoid(x) return x class GELU(nn.Module): @staticmethod def forward(x): erf = F.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * torch.pow(x, 3))) return 0.5 * x * (1 + erf) class FeedForward(nn.Module): def __init__(self, dim, hidden_dim, dropout=0.1, activation=GELU): super(FeedForward, self).__init__() self.mlp1 = nn.Linear(dim, hidden_dim) self.act = activation() self.mlp2 = nn.Linear(hidden_dim, dim) self.dropout = nn.Dropout(dropout) def forward(self, x): x = self.mlp1(x) x = self.act(x) x = self.dropout(x) x = self.mlp2(x) x = self.dropout(x) return x class MultiHeadAttention(nn.Module): def __init__(self, dim, heads=8, dropout=0.1, attention_dropout=0.1, qkv_bias=True): super(MultiHeadAttention, self).__init__() assert dim % heads == 0 self.heads = heads self.scale = (dim // heads) ** -0.5 self.to_qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.to_out = nn.Linear(dim, dim, bias=True) self.dropout = nn.Dropout(dropout) self.attention_dropout = nn.Dropout(attention_dropout) def forward(self, x): B, N, C = x.shape qkv = self.to_qkv(x).reshape(B, N, 3, self.heads, C // self.heads ).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] attn = q @ k.transpose(-2, -1) * self.scale attn = attn.softmax(dim=-1) attn = self.attention_dropout(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.to_out(x) x = self.dropout(x) return x class Encoder1DBlock(nn.Module): def __init__(self, hidden_dim, mlp_dim, heads, dropout, attention_dropout, drop_path, qkv_bias, activation, norm_layer=nn. LayerNorm): super(Encoder1DBlock, self).__init__() self.norm1 = norm_layer(hidden_dim) self.attention = MultiHeadAttention(hidden_dim, heads, dropout, attention_dropout, qkv_bias) self.norm2 = norm_layer(hidden_dim) self.feedforward = FeedForward(hidden_dim, mlp_dim, dropout, activation=activation) self.drop_path = DropPath(drop_path) if drop_path > 0.0 else None def forward(self, x): residual = x x = self.norm1(x) x = self.attention(x) if self.drop_path is not None: x = self.drop_path(x) x = x + residual y = self.norm2(x) y = self.feedforward(y) if self.drop_path is not None: y = self.drop_path(y) return x + y class EncoderNew(nn.Module): def __init__(self, hidden_dim, depth, mlp_dim, heads, dropout=0.1, attention_dropout=0.1, drop_path=0.1, qkv_bias=True, activation=GELU): super(EncoderNew, self).__init__() encoder_layer = OrderedDict() for d in range(depth): encoder_layer['encoder_{}'.format(d)] = Encoder1DBlock(hidden_dim, mlp_dim, heads, dropout, attention_dropout, drop_path, qkv_bias, activation) self.encoders = nn.Sequential(encoder_layer) self.encoder_norm = nn.LayerNorm(hidden_dim) def forward(self, input_0): primals_2 = self.encoders.encoder_0.norm1.weight primals_3 = self.encoders.encoder_0.norm1.bias primals_4 = self.encoders.encoder_0.attention.to_qkv.weight primals_5 = self.encoders.encoder_0.attention.to_qkv.bias primals_6 = self.encoders.encoder_0.attention.to_out.weight primals_7 = self.encoders.encoder_0.attention.to_out.bias primals_8 = self.encoders.encoder_0.norm2.weight primals_9 = self.encoders.encoder_0.norm2.bias primals_10 = self.encoders.encoder_0.feedforward.mlp1.weight primals_11 = self.encoders.encoder_0.feedforward.mlp1.bias primals_12 = self.encoders.encoder_0.feedforward.mlp2.weight primals_13 = self.encoders.encoder_0.feedforward.mlp2.bias primals_14 = self.encoder_norm.weight primals_15 = self.encoder_norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return output[0]
ModelTC/EOD
Encoder
false
14,089
[ "Apache-2.0" ]
196
164bff80486e9ae6a095a97667b365c46ceabd86
https://github.com/ModelTC/EOD/tree/164bff80486e9ae6a095a97667b365c46ceabd86
UpSampleLayer
import torch import torch.utils.data import torch.nn as torch_nn import torch.nn.functional as torch_nn_func class Conv1dKeepLength(torch_nn.Conv1d): """ Wrapper for causal convolution Input tensor: (batchsize=1, length, dim_in) Output tensor: (batchsize=1, length, dim_out) https://github.com/pytorch/pytorch/issues/1333 Note: Tanh is optional """ def __init__(self, input_dim, output_dim, dilation_s, kernel_s, causal= False, stride=1, groups=1, bias=True, tanh=True, pad_mode='constant'): super(Conv1dKeepLength, self).__init__(input_dim, output_dim, kernel_s, stride=stride, padding=0, dilation=dilation_s, groups =groups, bias=bias) self.pad_mode = pad_mode self.causal = causal if self.causal: self.pad_le = dilation_s * (kernel_s - 1) self.pad_ri = 0 else: self.pad_le = dilation_s * (kernel_s - 1) // 2 self.pad_ri = dilation_s * (kernel_s - 1) - self.pad_le if tanh: self.l_ac = torch_nn.Tanh() else: self.l_ac = torch_nn.Identity() def forward(self, data): x = torch_nn_func.pad(data.permute(0, 2, 1).unsqueeze(2), (self. pad_le, self.pad_ri, 0, 0), mode=self.pad_mode).squeeze(2) output = self.l_ac(super(Conv1dKeepLength, self).forward(x)) return output.permute(0, 2, 1) class MovingAverage(Conv1dKeepLength): """ Wrapper to define a moving average smoothing layer Note: MovingAverage can be implemented using TimeInvFIRFilter too. Here we define another Module dicrectly on Conv1DKeepLength """ def __init__(self, feature_dim, window_len, causal=False, pad_mode= 'replicate'): super(MovingAverage, self).__init__(feature_dim, feature_dim, 1, window_len, causal, groups=feature_dim, bias=False, tanh=False, pad_mode=pad_mode) torch_nn.init.constant_(self.weight, 1 / window_len) for p in self.parameters(): p.requires_grad = False def forward(self, data): return super(MovingAverage, self).forward(data) class UpSampleLayer(torch_nn.Module): """ Wrapper over up-sampling Input tensor: (batchsize=1, length, dim) Ouput tensor: (batchsize=1, length * up-sampling_factor, dim) """ def __init__(self, feature_dim, up_sampling_factor, smoothing=False): super(UpSampleLayer, self).__init__() self.scale_factor = up_sampling_factor self.l_upsamp = torch_nn.Upsample(scale_factor=self.scale_factor) if smoothing: self.l_ave1 = MovingAverage(feature_dim, self.scale_factor) self.l_ave2 = MovingAverage(feature_dim, self.scale_factor) else: self.l_ave1 = torch_nn.Identity() self.l_ave2 = torch_nn.Identity() return def forward(self, x): up_sampled_data = self.l_upsamp(x.permute(0, 2, 1)) return self.l_ave1(self.l_ave2(up_sampled_data.permute(0, 2, 1))) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'feature_dim': 4, 'up_sampling_factor': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.nn as torch_nn import torch.nn.functional as torch_nn_func assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 % 4 x2 = xindex // 64 x3 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.25 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = tl.load(in_ptr0 + (x1 + 4 * tmp4 + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp5, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_index_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 16, 4), (64, 1, 16), 0), class Conv1dKeepLength(torch_nn.Conv1d): """ Wrapper for causal convolution Input tensor: (batchsize=1, length, dim_in) Output tensor: (batchsize=1, length, dim_out) https://github.com/pytorch/pytorch/issues/1333 Note: Tanh is optional """ def __init__(self, input_dim, output_dim, dilation_s, kernel_s, causal= False, stride=1, groups=1, bias=True, tanh=True, pad_mode='constant'): super(Conv1dKeepLength, self).__init__(input_dim, output_dim, kernel_s, stride=stride, padding=0, dilation=dilation_s, groups =groups, bias=bias) self.pad_mode = pad_mode self.causal = causal if self.causal: self.pad_le = dilation_s * (kernel_s - 1) self.pad_ri = 0 else: self.pad_le = dilation_s * (kernel_s - 1) // 2 self.pad_ri = dilation_s * (kernel_s - 1) - self.pad_le if tanh: self.l_ac = torch_nn.Tanh() else: self.l_ac = torch_nn.Identity() def forward(self, data): x = torch_nn_func.pad(data.permute(0, 2, 1).unsqueeze(2), (self. pad_le, self.pad_ri, 0, 0), mode=self.pad_mode).squeeze(2) output = self.l_ac(super(Conv1dKeepLength, self).forward(x)) return output.permute(0, 2, 1) class MovingAverage(Conv1dKeepLength): """ Wrapper to define a moving average smoothing layer Note: MovingAverage can be implemented using TimeInvFIRFilter too. Here we define another Module dicrectly on Conv1DKeepLength """ def __init__(self, feature_dim, window_len, causal=False, pad_mode= 'replicate'): super(MovingAverage, self).__init__(feature_dim, feature_dim, 1, window_len, causal, groups=feature_dim, bias=False, tanh=False, pad_mode=pad_mode) torch_nn.init.constant_(self.weight, 1 / window_len) for p in self.parameters(): p.requires_grad = False def forward(self, data): return super(MovingAverage, self).forward(data) class UpSampleLayerNew(torch_nn.Module): """ Wrapper over up-sampling Input tensor: (batchsize=1, length, dim) Ouput tensor: (batchsize=1, length * up-sampling_factor, dim) """ def __init__(self, feature_dim, up_sampling_factor, smoothing=False): super(UpSampleLayerNew, self).__init__() self.scale_factor = up_sampling_factor self.l_upsamp = torch_nn.Upsample(scale_factor=self.scale_factor) if smoothing: self.l_ave1 = MovingAverage(feature_dim, self.scale_factor) self.l_ave2 = MovingAverage(feature_dim, self.scale_factor) else: self.l_ave1 = torch_nn.Identity() self.l_ave2 = torch_nn.Identity() return def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Ninushkat/Impact-Synth-Hardware
UpSampleLayer
false
14,090
[ "MIT" ]
55
37a2ecfec51b052b39d1ad0d4676f09d5f00e3c2
https://github.com/Ninushkat/Impact-Synth-Hardware/tree/37a2ecfec51b052b39d1ad0d4676f09d5f00e3c2
TimeVarFIRFilter
import torch import torch.utils.data import torch.nn as torch_nn import torch.nn.functional as torch_nn_func class TimeVarFIRFilter(torch_nn.Module): """ TimeVarFIRFilter Given sequences of filter coefficients and a signal, do filtering Filter coefs: (batchsize=1, signal_length, filter_order = K) Signal: (batchsize=1, signal_length, 1) For batch 0: For n in [1, sequence_length): output(0, n, 1) = \\sum_{k=1}^{K} signal(0, n-k, 1)*coef(0, n, k) Note: filter coef (0, n, :) is only used to compute the output at (0, n, 1) """ def __init__(self): super(TimeVarFIRFilter, self).__init__() def forward(self, signal, f_coef): """ Filter coefs: (batchsize=1, signal_length, filter_order = K) Signal: (batchsize=1, signal_length, 1) Output: (batchsize=1, signal_length, 1) For n in [1, sequence_length): output(0, n, 1)= \\sum_{k=1}^{K} signal(0, n-k, 1)*coef(0, n, k) This method may be not efficient: Suppose signal [x_1, ..., x_N], filter [a_1, ..., a_K] output [y_1, y_2, y_3, ..., y_N, *, * ... *] = a_1 * [x_1, x_2, x_3, ..., x_N, 0, ..., 0] + a_2 * [ 0, x_1, x_2, x_3, ..., x_N, 0, ..., 0] + a_3 * [ 0, 0, x_1, x_2, x_3, ..., x_N, 0, ..., 0] """ signal_l = signal.shape[1] order_k = f_coef.shape[-1] padded_signal = torch_nn_func.pad(signal, (0, 0, 0, order_k - 1)) y = torch.zeros_like(signal) for k in range(order_k): y += torch.roll(padded_signal, k, dims=1)[:, 0:signal_l, : ] * f_coef[:, :, k:k + 1] return y def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.nn as torch_nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x3 = xindex x4 = xindex // 4 x0 = xindex % 4 x2 = xindex // 16 tmp4 = tl.load(in_ptr1 + 4 * x4, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (1 + 4 * x4), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (2 + 4 * x4), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + (3 + 4 * x4), xmask, eviction_policy='evict_last' ) tmp0 = x1 tmp1 = tl.full([1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.load(in_ptr0 + x3, tmp2 & xmask, other=0.0) tmp5 = tmp3 * tmp4 tmp6 = (6 + x1) % 7 tmp7 = tmp6 < tmp1 tmp8 = tl.load(in_ptr0 + (x0 + 4 * ((6 + x1) % 7) + 16 * x2), tmp7 & xmask, other=0.0) tmp10 = tmp8 * tmp9 tmp11 = tmp5 + tmp10 tmp12 = (5 + x1) % 7 tmp13 = tmp12 < tmp1 tmp14 = tl.load(in_ptr0 + (x0 + 4 * ((5 + x1) % 7) + 16 * x2), tmp13 & xmask, other=0.0) tmp16 = tmp14 * tmp15 tmp17 = tmp11 + tmp16 tmp18 = (4 + x1) % 7 tmp19 = tmp18 < tmp1 tmp20 = tl.load(in_ptr0 + (x0 + 4 * ((4 + x1) % 7) + 16 * x2), tmp19 & xmask, other=0.0) tmp22 = tmp20 * tmp21 tmp23 = tmp17 + tmp22 tl.store(in_out_ptr0 + x3, tmp23, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_mul_0[grid(64)](buf1, arg0_1, arg1_1, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf1, class TimeVarFIRFilterNew(torch_nn.Module): """ TimeVarFIRFilter Given sequences of filter coefficients and a signal, do filtering Filter coefs: (batchsize=1, signal_length, filter_order = K) Signal: (batchsize=1, signal_length, 1) For batch 0: For n in [1, sequence_length): output(0, n, 1) = \\sum_{k=1}^{K} signal(0, n-k, 1)*coef(0, n, k) Note: filter coef (0, n, :) is only used to compute the output at (0, n, 1) """ def __init__(self): super(TimeVarFIRFilterNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Ninushkat/Impact-Synth-Hardware
TimeVarFIRFilter
false
14,091
[ "MIT" ]
55
37a2ecfec51b052b39d1ad0d4676f09d5f00e3c2
https://github.com/Ninushkat/Impact-Synth-Hardware/tree/37a2ecfec51b052b39d1ad0d4676f09d5f00e3c2
ModuleFallbackMain
import torch import torch.nn as nn import torch.fx class ModuleFallbackSub(nn.Module): def __init__(self): super(ModuleFallbackSub, self).__init__() self.conv = nn.Conv2d(1, 3, 3) self.relu = nn.ReLU() def forward(self, x): return self.relu(self.conv(x)) class ModuleFallbackMain(nn.Module): def __init__(self): super(ModuleFallbackMain, self).__init__() self.layer1 = ModuleFallbackSub() self.conv = nn.Conv2d(3, 6, 3) self.relu = nn.ReLU() def forward(self, x): return self.relu(self.conv(self.layer1(x))) def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.fx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 46128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3844 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 86400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = xindex // 3600 % 6 x0 = xindex % 3600 x3 = xindex // 3600 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x0 + 3712 * x3), tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (3, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (3,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (6, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_5, (6,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 3, 62, 62), (11532, 3844, 62, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(46128)](buf1, primals_2, 46128, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 6, 60, 60), (21600, 3600, 60, 1)) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 6, 60, 60), (22272, 3712, 60, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_1[grid(86400)]( buf3, primals_5, buf4, 86400, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1, buf4 class ModuleFallbackSub(nn.Module): def __init__(self): super(ModuleFallbackSub, self).__init__() self.conv = nn.Conv2d(1, 3, 3) self.relu = nn.ReLU() def forward(self, x): return self.relu(self.conv(x)) class ModuleFallbackMainNew(nn.Module): def __init__(self): super(ModuleFallbackMainNew, self).__init__() self.layer1 = ModuleFallbackSub() self.conv = nn.Conv2d(3, 6, 3) self.relu = nn.ReLU() def forward(self, input_0): primals_1 = self.layer1.conv.weight primals_2 = self.layer1.conv.bias primals_4 = self.conv.weight primals_5 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
NVIDIA/Torch-TensorRT
ModuleFallbackMain
false
14,092
[ "BSD-3-Clause" ]
430
1a22204fecec690bc3c2a318dab4f57b98c57f05
https://github.com/NVIDIA/Torch-TensorRT/tree/1a22204fecec690bc3c2a318dab4f57b98c57f05
Decoder5
import torch import torch.nn as nn class Decoder5(nn.Module): def __init__(self, model=None, fixed=False): super(Decoder5, self).__init__() self.fixed = fixed self.conv51 = nn.Conv2d(512, 512, 3, 1, 0) self.conv44 = nn.Conv2d(512, 512, 3, 1, 0) self.conv43 = nn.Conv2d(512, 512, 3, 1, 0) self.conv42 = nn.Conv2d(512, 512, 3, 1, 0) self.conv41 = nn.Conv2d(512, 256, 3, 1, 0) self.conv34 = nn.Conv2d(256, 256, 3, 1, 0) self.conv33 = nn.Conv2d(256, 256, 3, 1, 0) self.conv32 = nn.Conv2d(256, 256, 3, 1, 0) self.conv31 = nn.Conv2d(256, 128, 3, 1, 0) self.conv22 = nn.Conv2d(128, 128, 3, 1, 0) self.conv21 = nn.Conv2d(128, 64, 3, 1, 0) self.conv12 = nn.Conv2d(64, 64, 3, 1, 0) self.conv11 = nn.Conv2d(64, 3, 3, 1, 0) self.relu = nn.ReLU(inplace=True) self.unpool = nn.UpsamplingNearest2d(scale_factor=2) self.pad = nn.ReflectionPad2d((1, 1, 1, 1)) if model: assert os.path.splitext(model)[1] in {'.t7', '.pth'} if model.endswith('.t7'): t7_model = load_lua(model) load_param(t7_model, 1, self.conv51) load_param(t7_model, 5, self.conv44) load_param(t7_model, 8, self.conv43) load_param(t7_model, 11, self.conv42) load_param(t7_model, 14, self.conv41) load_param(t7_model, 18, self.conv34) load_param(t7_model, 21, self.conv33) load_param(t7_model, 24, self.conv32) load_param(t7_model, 27, self.conv31) load_param(t7_model, 31, self.conv22) load_param(t7_model, 34, self.conv21) load_param(t7_model, 38, self.conv12) load_param(t7_model, 41, self.conv11) else: self.load_state_dict(torch.load(model, map_location=lambda storage, location: storage)) if fixed: for param in self.parameters(): param.requires_grad = False def forward(self, input): y = self.relu(self.conv51(self.pad(input))) y = self.unpool(y) y = self.relu(self.conv44(self.pad(y))) y = self.relu(self.conv43(self.pad(y))) y = self.relu(self.conv42(self.pad(y))) y = self.relu(self.conv41(self.pad(y))) y = self.unpool(y) y = self.relu(self.conv34(self.pad(y))) y = self.relu(self.conv33(self.pad(y))) y = self.relu(self.conv32(self.pad(y))) y = self.relu(self.conv31(self.pad(y))) y = self.unpool(y) y = self.relu(self.conv22(self.pad(y))) y = self.relu(self.conv21(self.pad(y))) y = self.unpool(y) y = self.relu(self.conv12(self.pad(y))) y = self.relu(self.conv11(self.pad(y))) return y def forward_branch(self, input): out51 = self.relu(self.conv51(self.pad(input))) out51 = self.unpool(out51) out44 = self.relu(self.conv44(self.pad(out51))) out43 = self.relu(self.conv43(self.pad(out44))) out42 = self.relu(self.conv42(self.pad(out43))) out41 = self.relu(self.conv41(self.pad(out42))) out41 = self.unpool(out41) out34 = self.relu(self.conv34(self.pad(out41))) out33 = self.relu(self.conv33(self.pad(out34))) out32 = self.relu(self.conv32(self.pad(out33))) out31 = self.relu(self.conv31(self.pad(out32))) out31 = self.unpool(out31) out22 = self.relu(self.conv22(self.pad(out31))) out21 = self.relu(self.conv21(self.pad(out22))) out21 = self.unpool(out21) out12 = self.relu(self.conv12(self.pad(out21))) out11 = self.relu(self.conv11(self.pad(out12))) return out51, out41, out31, out21, out11 def get_inputs(): return [torch.rand([4, 512, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), None, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, None) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_1(out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 10 % 10 x0 = xindex % 10 x4 = xindex // 100 x2 = xindex // 100 % 512 x7 = xindex tmp0 = tl.load(in_ptr0 + (7 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x1 ))), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (7 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x0 ))), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 4 * tmp4 + 16 * x4), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x7, tmp13, None) @triton.jit def triton_poi_fused_convolution_reflection_pad2d_relu_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 10 x1 = xindex // 10 % 10 x4 = xindex // 100 x2 = xindex // 100 % 512 x5 = xindex tmp0 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x0)) + -8 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) + 64 * x4), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + x5, tmp4, None) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_4(out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 18 % 18 x0 = xindex % 18 x4 = xindex // 324 x2 = xindex // 324 % 256 x7 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-15 + tl_math.abs(-1 + x1))), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-15 + tl_math.abs(-1 + x0))), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 8, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 8 * tmp4 + 64 * x4), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x7, tmp13, None) @triton.jit def triton_poi_fused_convolution_reflection_pad2d_relu_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 18 x1 = xindex // 18 % 18 x4 = xindex // 324 x2 = xindex // 324 % 256 x5 = xindex tmp0 = tl.load(in_ptr0 + (255 + -1 * tl_math.abs(-15 + tl_math.abs(-1 + x0)) + -16 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 256 * x4), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + x5, tmp4, None) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_7(out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 34 % 34 x0 = xindex % 34 x4 = xindex // 1156 x2 = xindex // 1156 % 128 x7 = xindex tmp0 = tl.load(in_ptr0 + (31 + -1 * tl_math.abs(-31 + tl_math.abs(-1 + x1))), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (31 + -1 * tl_math.abs(-31 + tl_math.abs(-1 + x0))), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 16, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 16 * tmp4 + 256 * x4), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x7, tmp13, None) @triton.jit def triton_poi_fused_convolution_reflection_pad2d_relu_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 34 x1 = xindex // 34 % 34 x4 = xindex // 1156 x2 = xindex // 1156 % 128 x5 = xindex tmp0 = tl.load(in_ptr0 + (1023 + -1 * tl_math.abs(-31 + tl_math.abs(-1 + x0)) + -32 * tl_math.abs(-31 + tl_math.abs(-1 + x1)) + 1024 * x4), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + x5, tmp4, None) @triton.jit def triton_poi_fused_arange_10(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_11(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_12(in_ptr0 , in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1115136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 66 % 66 x0 = xindex % 66 x4 = xindex // 4356 x2 = xindex // 4356 % 64 x7 = xindex tmp0 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-63 + tl_math.abs(-1 + x1))), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-63 + tl_math.abs(-1 + x0))), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 32, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 32 * tmp4 + 1024 * x4), xmask, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x7, tmp13, xmask) @triton.jit def triton_poi_fused_convolution_reflection_pad2d_relu_13(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1115136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 66 x1 = xindex // 66 % 66 x4 = xindex // 4356 x2 = xindex // 4356 % 64 x5 = xindex tmp0 = tl.load(in_ptr0 + (4095 + -1 * tl_math.abs(-63 + tl_math.abs(-1 + x0)) + -64 * tl_math.abs(-63 + tl_math.abs(-1 + x1)) + 4096 * x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + x5, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_14(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 3 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, None) tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_15(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_16(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_17(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_18(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_19(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 256 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_20(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 256 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_21(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 512 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_22(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 512 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27) = args args.clear() assert_size_stride(primals_1, (4, 512, 4, 4), (8192, 16, 4, 1)) assert_size_stride(primals_2, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_3, (512,), (1,)) assert_size_stride(primals_4, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_5, (512,), (1,)) assert_size_stride(primals_6, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_7, (512,), (1,)) assert_size_stride(primals_8, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_9, (512,), (1,)) assert_size_stride(primals_10, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256,), (1,)) assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (256,), (1,)) assert_size_stride(primals_18, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_19, (128,), (1,)) assert_size_stride(primals_20, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_21, (128,), (1,)) assert_size_stride(primals_22, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_23, (64,), (1,)) assert_size_stride(primals_24, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_25, (64,), (1,)) assert_size_stride(primals_26, (3, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_27, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512, 6, 6), (18432, 36, 6, 1), torch. float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(73728)](primals_1, buf0, 73728, XBLOCK=1024, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 512, 4, 4), (8192, 16, 4, 1)) buf2 = empty_strided_cuda((8,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_1[grid(8)](buf2, 8, XBLOCK =8, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 512, 10, 10), (51200, 100, 10, 1), torch.float32) triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2[grid (204800)](buf2, buf1, primals_3, buf3, 204800, XBLOCK=512, num_warps=8, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 512, 8, 8), (32768, 64, 8, 1)) buf5 = empty_strided_cuda((4, 512, 10, 10), (51200, 100, 10, 1), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_3[grid(204800)](buf4 , primals_5, buf5, 204800, XBLOCK=512, num_warps=8, num_stages=1) buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 512, 8, 8), (32768, 64, 8, 1)) buf7 = empty_strided_cuda((4, 512, 10, 10), (51200, 100, 10, 1), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_3[grid(204800)](buf6 , primals_7, buf7, 204800, XBLOCK=512, num_warps=8, num_stages=1) buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 512, 8, 8), (32768, 64, 8, 1)) buf9 = empty_strided_cuda((4, 512, 10, 10), (51200, 100, 10, 1), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_3[grid(204800)](buf8 , primals_9, buf9, 204800, XBLOCK=512, num_warps=8, num_stages=1) buf10 = extern_kernels.convolution(buf9, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 256, 8, 8), (16384, 64, 8, 1)) buf11 = empty_strided_cuda((16,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_4[grid(16)](buf11, 16, XBLOCK=16, num_warps=1, num_stages=1) buf12 = empty_strided_cuda((4, 256, 18, 18), (82944, 324, 18, 1), torch.float32) triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5[grid (331776)](buf11, buf10, primals_11, buf12, 331776, XBLOCK=1024, num_warps=4, num_stages=1) buf13 = extern_kernels.convolution(buf12, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 256, 16, 16), (65536, 256, 16, 1)) buf14 = empty_strided_cuda((4, 256, 18, 18), (82944, 324, 18, 1), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_6[grid(331776)]( buf13, primals_13, buf14, 331776, XBLOCK=1024, num_warps=4, num_stages=1) buf15 = extern_kernels.convolution(buf14, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 256, 16, 16), (65536, 256, 16, 1)) buf16 = empty_strided_cuda((4, 256, 18, 18), (82944, 324, 18, 1), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_6[grid(331776)]( buf15, primals_15, buf16, 331776, XBLOCK=1024, num_warps=4, num_stages=1) buf17 = extern_kernels.convolution(buf16, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 256, 16, 16), (65536, 256, 16, 1)) buf18 = empty_strided_cuda((4, 256, 18, 18), (82944, 324, 18, 1), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_6[grid(331776)]( buf17, primals_17, buf18, 331776, XBLOCK=1024, num_warps=4, num_stages=1) buf19 = extern_kernels.convolution(buf18, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 128, 16, 16), (32768, 256, 16, 1)) buf20 = empty_strided_cuda((32,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_7[grid(32)](buf20, 32, XBLOCK=32, num_warps=1, num_stages=1) buf21 = empty_strided_cuda((4, 128, 34, 34), (147968, 1156, 34, 1), torch.float32) triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8[grid (591872)](buf20, buf19, primals_19, buf21, 591872, XBLOCK=1024, num_warps=4, num_stages=1) buf22 = extern_kernels.convolution(buf21, primals_20, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf23 = empty_strided_cuda((4, 128, 34, 34), (147968, 1156, 34, 1), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_9[grid(591872)]( buf22, primals_21, buf23, 591872, XBLOCK=512, num_warps=8, num_stages=1) buf24 = extern_kernels.convolution(buf23, primals_22, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf25 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused_arange_10[grid(64)](buf25, 64, XBLOCK=64, num_warps=1, num_stages=1) buf26 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_11[grid(64)](buf26, 64, XBLOCK=64, num_warps=1, num_stages=1) buf27 = empty_strided_cuda((4, 64, 66, 66), (278784, 4356, 66, 1), torch.float32) triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_12[ grid(1115136)](buf26, buf24, primals_23, buf27, 1115136, XBLOCK =1024, num_warps=4, num_stages=1) buf28 = extern_kernels.convolution(buf27, primals_24, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf29 = empty_strided_cuda((4, 64, 66, 66), (278784, 4356, 66, 1), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_13[grid(1115136)]( buf28, primals_25, buf29, 1115136, XBLOCK=1024, num_warps=4, num_stages=1) buf30 = extern_kernels.convolution(buf29, primals_26, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf31 = buf30 del buf30 buf32 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_14[grid(49152)]( buf31, primals_27, buf32, 49152, XBLOCK=256, num_warps=4, num_stages=1) del primals_27 buf33 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_15[grid(1048576)]( buf28, primals_25, buf33, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del buf28 del primals_25 buf34 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_16[grid(262144)]( buf24, primals_23, buf34, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del buf24 del primals_23 buf35 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_17[grid(524288)]( buf22, primals_21, buf35, 524288, XBLOCK=512, num_warps=8, num_stages=1) del buf22 del primals_21 buf36 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_18[grid(131072)]( buf19, primals_19, buf36, 131072, XBLOCK=512, num_warps=8, num_stages=1) del buf19 del primals_19 buf37 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_19[grid(262144)]( buf17, primals_17, buf37, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del buf17 del primals_17 buf38 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_19[grid(262144)]( buf15, primals_15, buf38, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del buf15 del primals_15 buf39 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_19[grid(262144)]( buf13, primals_13, buf39, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del buf13 del primals_13 buf40 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .bool) triton_poi_fused_convolution_relu_threshold_backward_20[grid(65536)]( buf10, primals_11, buf40, 65536, XBLOCK=512, num_warps=4, num_stages=1) del buf10 del primals_11 buf41 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch .bool) triton_poi_fused_convolution_relu_threshold_backward_21[grid(131072)]( buf8, primals_9, buf41, 131072, XBLOCK=512, num_warps=8, num_stages=1) del buf8 del primals_9 buf42 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch .bool) triton_poi_fused_convolution_relu_threshold_backward_21[grid(131072)]( buf6, primals_7, buf42, 131072, XBLOCK=512, num_warps=8, num_stages=1) del buf6 del primals_7 buf43 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch .bool) triton_poi_fused_convolution_relu_threshold_backward_21[grid(131072)]( buf4, primals_5, buf43, 131072, XBLOCK=512, num_warps=8, num_stages=1) del buf4 del primals_5 buf44 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool ) triton_poi_fused_convolution_relu_threshold_backward_22[grid(32768)]( buf1, primals_3, buf44, 32768, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del primals_3 return (buf31, primals_2, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, buf0, buf2, buf3, buf5, buf7, buf9, buf11, buf12, buf14, buf16, buf18, buf20, buf21, buf23, buf25, buf26, buf27, buf29, buf32, buf33, buf34, buf35, buf36, buf37, buf38, buf39, buf40, buf41, buf42, buf43, buf44) class Decoder5New(nn.Module): def __init__(self, model=None, fixed=False): super(Decoder5New, self).__init__() self.fixed = fixed self.conv51 = nn.Conv2d(512, 512, 3, 1, 0) self.conv44 = nn.Conv2d(512, 512, 3, 1, 0) self.conv43 = nn.Conv2d(512, 512, 3, 1, 0) self.conv42 = nn.Conv2d(512, 512, 3, 1, 0) self.conv41 = nn.Conv2d(512, 256, 3, 1, 0) self.conv34 = nn.Conv2d(256, 256, 3, 1, 0) self.conv33 = nn.Conv2d(256, 256, 3, 1, 0) self.conv32 = nn.Conv2d(256, 256, 3, 1, 0) self.conv31 = nn.Conv2d(256, 128, 3, 1, 0) self.conv22 = nn.Conv2d(128, 128, 3, 1, 0) self.conv21 = nn.Conv2d(128, 64, 3, 1, 0) self.conv12 = nn.Conv2d(64, 64, 3, 1, 0) self.conv11 = nn.Conv2d(64, 3, 3, 1, 0) self.relu = nn.ReLU(inplace=True) self.unpool = nn.UpsamplingNearest2d(scale_factor=2) self.pad = nn.ReflectionPad2d((1, 1, 1, 1)) if model: assert os.path.splitext(model)[1] in {'.t7', '.pth'} if model.endswith('.t7'): t7_model = load_lua(model) load_param(t7_model, 1, self.conv51) load_param(t7_model, 5, self.conv44) load_param(t7_model, 8, self.conv43) load_param(t7_model, 11, self.conv42) load_param(t7_model, 14, self.conv41) load_param(t7_model, 18, self.conv34) load_param(t7_model, 21, self.conv33) load_param(t7_model, 24, self.conv32) load_param(t7_model, 27, self.conv31) load_param(t7_model, 31, self.conv22) load_param(t7_model, 34, self.conv21) load_param(t7_model, 38, self.conv12) load_param(t7_model, 41, self.conv11) else: self.load_state_dict(torch.load(model, map_location=lambda storage, location: storage)) if fixed: for param in self.parameters(): param.requires_grad = False def forward_branch(self, input): out51 = self.relu(self.conv51(self.pad(input))) out51 = self.unpool(out51) out44 = self.relu(self.conv44(self.pad(out51))) out43 = self.relu(self.conv43(self.pad(out44))) out42 = self.relu(self.conv42(self.pad(out43))) out41 = self.relu(self.conv41(self.pad(out42))) out41 = self.unpool(out41) out34 = self.relu(self.conv34(self.pad(out41))) out33 = self.relu(self.conv33(self.pad(out34))) out32 = self.relu(self.conv32(self.pad(out33))) out31 = self.relu(self.conv31(self.pad(out32))) out31 = self.unpool(out31) out22 = self.relu(self.conv22(self.pad(out31))) out21 = self.relu(self.conv21(self.pad(out22))) out21 = self.unpool(out21) out12 = self.relu(self.conv12(self.pad(out21))) out11 = self.relu(self.conv11(self.pad(out12))) return out51, out41, out31, out21, out11 def forward(self, input_0): primals_2 = self.conv51.weight primals_3 = self.conv51.bias primals_4 = self.conv44.weight primals_5 = self.conv44.bias primals_6 = self.conv43.weight primals_7 = self.conv43.bias primals_8 = self.conv42.weight primals_9 = self.conv42.bias primals_10 = self.conv41.weight primals_11 = self.conv41.bias primals_12 = self.conv34.weight primals_13 = self.conv34.bias primals_14 = self.conv33.weight primals_15 = self.conv33.bias primals_16 = self.conv32.weight primals_17 = self.conv32.bias primals_18 = self.conv31.weight primals_19 = self.conv31.bias primals_20 = self.conv22.weight primals_21 = self.conv22.bias primals_22 = self.conv21.weight primals_23 = self.conv21.bias primals_24 = self.conv12.weight primals_25 = self.conv12.bias primals_26 = self.conv11.weight primals_27 = self.conv11.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27]) return output[0]
MingSun-Tse/Collaborative-Distillation
Decoder5
false
14,093
[ "MIT" ]
172
915712674af82ff91d926d922c14988cce0430f3
https://github.com/MingSun-Tse/Collaborative-Distillation/tree/915712674af82ff91d926d922c14988cce0430f3
MyLinear
import torch import torch.nn as nn import torch.nn.functional as F class MyLinear(nn.Module): """Linear layer with equalized learning rate and custom learning rate multiplier.""" def __init__(self, input_size, output_size, gain=2 ** 0.5, use_wscale= False, lrmul=1, bias=True): super().__init__() he_std = gain * input_size ** -0.5 if use_wscale: init_std = 1.0 / lrmul self.w_mul = he_std * lrmul else: init_std = he_std / lrmul self.w_mul = lrmul self.weight = torch.nn.Parameter(torch.randn(output_size, input_size) * init_std) if bias: self.bias = torch.nn.Parameter(torch.zeros(output_size)) self.b_mul = lrmul else: self.bias = None def forward(self, x): bias = self.bias if bias is not None: bias = bias * self.b_mul return F.linear(x, self.weight * self.w_mul, bias) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_1, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_1 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, reinterpret_tensor(primals_3, (64, 4), ( 4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class MyLinearNew(nn.Module): """Linear layer with equalized learning rate and custom learning rate multiplier.""" def __init__(self, input_size, output_size, gain=2 ** 0.5, use_wscale= False, lrmul=1, bias=True): super().__init__() he_std = gain * input_size ** -0.5 if use_wscale: init_std = 1.0 / lrmul self.w_mul = he_std * lrmul else: init_std = he_std / lrmul self.w_mul = lrmul self.weight = torch.nn.Parameter(torch.randn(output_size, input_size) * init_std) if bias: self.bias = torch.nn.Parameter(torch.zeros(output_size)) self.b_mul = lrmul else: self.bias = None def forward(self, input_0): primals_2 = self.weight primals_1 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
NeuralBending/StyleCLIP
MyLinear
false
14,094
[ "MIT" ]
91
190d3a0d48823ccdbdd15c7f8af6e08703a6dbd8
https://github.com/NeuralBending/StyleCLIP/tree/190d3a0d48823ccdbdd15c7f8af6e08703a6dbd8
encoderDepth
import torch import torch.nn as nn import torch.nn.functional as F class encoderDepth(nn.Module): def __init__(self): super(encoderDepth, self).__init__() self.conv1 = nn.Conv2d(in_channels=13, out_channels=64, kernel_size =4, stride=2, padding=1, bias=True) self.gn1 = nn.GroupNorm(4, 64) self.conv2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size =3, stride=1, padding=1, bias=True) self.gn2 = nn.GroupNorm(4, 64) self.conv3 = nn.Conv2d(in_channels=64, out_channels=256, kernel_size=4, stride=2, padding=1, bias=True) self.gn3 = nn.GroupNorm(16, 256) self.conv4 = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1, bias=True) self.gn4 = nn.GroupNorm(16, 256) self.conv5 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=4, stride=2, padding=1, bias=True) self.gn5 = nn.GroupNorm(32, 512) self.conv6 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1, bias=True) self.gn6 = nn.GroupNorm(32, 512) def forward(self, x): x1 = F.relu(self.gn1(self.conv1(x)), True) x2 = F.relu(self.gn2(self.conv2(x1)), True) x3 = F.relu(self.gn3(self.conv3(x2)), True) x4 = F.relu(self.gn4(self.conv4(x3)), True) x5 = F.relu(self.gn5(self.conv5(x4)), True) x = F.relu(self.gn6(self.conv6(x5)), True) return x def get_inputs(): return [torch.rand([4, 13, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 832 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 13 y1 = yindex // 13 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 13 * x2 + 208 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 52 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 13 y1 = yindex // 13 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 13 * x2 + 53248 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1024 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 256 * x2 + 4096 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, None) @triton.jit def triton_per_fused_native_group_norm_8(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex x0 = xindex % 8 x1 = xindex // 8 % 64 x2 = xindex // 512 x4 = xindex tmp0 = tl.load(in_ptr0 + (8 * x0 + 64 * ((r3 + 128 * x1) % 1024) + 65536 * x2 + (r3 + 128 * x1) // 1024), None, eviction_policy= 'evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.full([XBLOCK, 1], 128, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tl.store(out_ptr0 + x4, tmp8, None) tl.store(out_ptr1 + x4, tmp13, None) tl.store(out_ptr2 + x4, tmp7, None) @triton.jit def triton_per_fused_native_group_norm_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 32 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 8 x1 = xindex // 8 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 8 * r2 + 512 * x1), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (x0 + 8 * r2 + 512 * x1), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (x0 + 8 * r2 + 512 * x1), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp15 = tmp12[:, None] tl.store(out_ptr0 + x3, tmp13, xmask) tl.store(out_ptr1 + x3, tmp14, xmask) tl.store(out_ptr2 + x3, tmp15, xmask) @triton.jit def triton_per_fused_native_group_norm_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 2 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 2 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (r1 + 2 * x0), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp12[:, None] tmp16 = 16384.0 tmp17 = tmp14 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.store(out_ptr2 + x0, tmp20, xmask) tl.store(out_ptr0 + x0, tmp13, xmask) tl.store(out_ptr1 + x0, tmp14, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 64 x2 = xindex // 65536 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (4 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (4 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 16384.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_poi_fused_convolution_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, None) @triton.jit def triton_red_fused_native_group_norm_13(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 64 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 16 x1 = xindex // 16 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 16 r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r2 + 16 * x0 + 256 * r3 + 65536 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 4096.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_14(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 256 x2 = xindex // 65536 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (16 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (16 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 4096.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_poi_fused_convolution_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, None) @triton.jit def triton_per_fused_native_group_norm_16(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex % 16 r3 = rindex // 16 x0 = xindex % 32 x1 = xindex // 32 x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 16 * x0 + 512 * r3 + 32768 * x1), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + x4, tmp18, None) tl.store(out_ptr0 + x4, tmp8, None) tl.store(out_ptr1 + x4, tmp13, None) @triton.jit def triton_poi_fused_native_group_norm_relu_17(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 512 x2 = xindex // 32768 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 1024.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_poi_fused_native_group_norm_relu_threshold_backward_18(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 512 y1 = yindex // 512 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 512 * x2 + 32768 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y3 // 16, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + y3 // 16, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + y0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 1024.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1, 1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tmp16 = 0.0 tmp17 = tmp15 <= tmp16 tl.store(out_ptr0 + (x2 + 64 * y3), tmp15, xmask) tl.store(out_ptr1 + (y0 + 512 * x2 + 32768 * y1), tmp17, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25) = args args.clear() assert_size_stride(primals_1, (64, 13, 4, 4), (208, 16, 4, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 13, 64, 64), (53248, 4096, 64, 1)) assert_size_stride(primals_4, (64,), (1,)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64,), (1,)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (256, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256,), (1,)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256,), (1,)) assert_size_stride(primals_16, (256,), (1,)) assert_size_stride(primals_17, (256,), (1,)) assert_size_stride(primals_18, (512, 256, 4, 4), (4096, 16, 4, 1)) assert_size_stride(primals_19, (512,), (1,)) assert_size_stride(primals_20, (512,), (1,)) assert_size_stride(primals_21, (512,), (1,)) assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (512,), (1,)) assert_size_stride(primals_24, (512,), (1,)) assert_size_stride(primals_25, (512,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 13, 4, 4), (208, 1, 52, 13), torch. float32) get_raw_stream(0) triton_poi_fused_0[grid(832, 16)](primals_1, buf0, 832, 16, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 13, 64, 64), (53248, 1, 832, 13), torch.float32) triton_poi_fused_1[grid(52, 4096)](primals_3, buf1, 52, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch. float32) triton_poi_fused_2[grid(4096, 9)](primals_6, buf2, 4096, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf3 = empty_strided_cuda((256, 64, 4, 4), (1024, 1, 256, 64), torch.float32) triton_poi_fused_3[grid(16384, 16)](primals_10, buf3, 16384, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_4[grid(65536, 9)](primals_14, buf4, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_14 buf5 = empty_strided_cuda((512, 256, 4, 4), (4096, 1, 1024, 256), torch.float32) triton_poi_fused_5[grid(131072, 16)](primals_18, buf5, 131072, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_18 buf6 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_6[grid(262144, 9)](primals_22, buf6, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_22 buf7 = extern_kernels.convolution(buf1, buf0, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 64, 32, 32), (65536, 1, 2048, 64)) buf8 = buf7 del buf7 triton_poi_fused_convolution_7[grid(262144)](buf8, primals_2, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf9 = empty_strided_cuda((4, 4, 1, 1, 2, 64), (512, 2, 2048, 2048, 1, 8), torch.float32) buf10 = empty_strided_cuda((4, 4, 1, 1, 2, 64), (512, 2, 2048, 2048, 1, 8), torch.float32) buf11 = empty_strided_cuda((4, 4, 1, 1, 2, 64), (512, 2, 2048, 2048, 1, 8), torch.float32) triton_per_fused_native_group_norm_8[grid(2048)](buf8, buf9, buf10, buf11, 2048, 128, XBLOCK=8, num_warps=8, num_stages=1) buf12 = empty_strided_cuda((4, 4, 1, 1, 2), (8, 2, 32, 32, 1), torch.float32) buf13 = empty_strided_cuda((4, 4, 1, 1, 2), (8, 2, 32, 32, 1), torch.float32) buf14 = empty_strided_cuda((4, 4, 1, 1, 2), (8, 2, 32, 32, 1), torch.float32) triton_per_fused_native_group_norm_9[grid(32)](buf9, buf10, buf11, buf12, buf13, buf14, 32, 64, XBLOCK=1, num_warps=2, num_stages=1) buf15 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf16 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf18 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) triton_per_fused_native_group_norm_10[grid(16)](buf12, buf13, buf14, buf15, buf16, buf18, 16, 2, XBLOCK=1, num_warps=2, num_stages=1) buf19 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32) triton_poi_fused_native_group_norm_relu_11[grid(262144)](buf8, buf15, buf16, primals_4, primals_5, buf19, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf20 = extern_kernels.convolution(buf19, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 64, 32, 32), (65536, 1, 2048, 64)) buf21 = buf20 del buf20 triton_poi_fused_convolution_7[grid(262144)](buf21, primals_7, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf22 = buf9 del buf9 buf23 = buf11 del buf11 buf24 = buf10 del buf10 triton_per_fused_native_group_norm_8[grid(2048)](buf21, buf22, buf23, buf24, 2048, 128, XBLOCK=8, num_warps=8, num_stages=1) buf25 = buf14 del buf14 buf26 = buf13 del buf13 buf27 = buf12 del buf12 triton_per_fused_native_group_norm_9[grid(32)](buf22, buf23, buf24, buf25, buf26, buf27, 32, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf22 del buf23 del buf24 buf28 = buf16 del buf16 buf29 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf31 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) triton_per_fused_native_group_norm_10[grid(16)](buf25, buf26, buf27, buf28, buf29, buf31, 16, 2, XBLOCK=1, num_warps=2, num_stages=1) del buf25 del buf26 del buf27 buf32 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32) triton_poi_fused_native_group_norm_relu_11[grid(262144)](buf21, buf28, buf29, primals_8, primals_9, buf32, 262144, XBLOCK=512, num_warps=8, num_stages=1) del buf29 del primals_9 buf33 = extern_kernels.convolution(buf32, buf3, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf33, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf34 = buf33 del buf33 triton_poi_fused_convolution_12[grid(262144)](buf34, primals_11, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf35 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch. float32) buf36 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch. float32) buf38 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch. float32) triton_red_fused_native_group_norm_13[grid(64)](buf34, buf35, buf36, buf38, 64, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf39 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) triton_poi_fused_native_group_norm_relu_14[grid(262144)](buf34, buf35, buf36, primals_12, primals_13, buf39, 262144, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_13 buf40 = extern_kernels.convolution(buf39, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf41 = buf40 del buf40 triton_poi_fused_convolution_12[grid(262144)](buf41, primals_15, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_15 buf42 = buf36 del buf36 buf43 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch. float32) buf45 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch. float32) triton_red_fused_native_group_norm_13[grid(64)](buf41, buf42, buf43, buf45, 64, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf46 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) triton_poi_fused_native_group_norm_relu_14[grid(262144)](buf41, buf42, buf43, primals_16, primals_17, buf46, 262144, XBLOCK= 1024, num_warps=4, num_stages=1) del buf43 del primals_17 buf47 = extern_kernels.convolution(buf46, buf5, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf47, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf48 = buf47 del buf47 triton_poi_fused_convolution_15[grid(131072)](buf48, primals_19, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_19 buf49 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf50 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf52 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_per_fused_native_group_norm_16[grid(128)](buf48, buf49, buf50, buf52, 128, 1024, num_warps=8, num_stages=1) buf53 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_17[grid(131072)](buf48, buf49, buf50, primals_20, primals_21, buf53, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_21 buf54 = extern_kernels.convolution(buf53, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf54, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf55 = buf54 del buf54 triton_poi_fused_convolution_15[grid(131072)](buf55, primals_23, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_23 buf56 = buf50 del buf50 buf57 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf59 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_per_fused_native_group_norm_16[grid(128)](buf55, buf56, buf57, buf59, 128, 1024, num_warps=8, num_stages=1) buf60 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch .float32) buf61 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.bool) triton_poi_fused_native_group_norm_relu_threshold_backward_18[grid( 2048, 64)](buf55, buf56, buf57, primals_24, primals_25, buf60, buf61, 2048, 64, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del buf57 del primals_25 return (buf60, buf0, buf1, primals_4, buf2, primals_8, buf3, primals_12, buf4, primals_16, buf5, primals_20, buf6, primals_24, buf8, reinterpret_tensor(buf15, (4, 4), (4, 1), 0), reinterpret_tensor( buf18, (4, 4), (4, 1), 0), buf19, buf21, reinterpret_tensor(buf28, (4, 4), (4, 1), 0), reinterpret_tensor(buf31, (4, 4), (4, 1), 0), buf32, buf34, reinterpret_tensor(buf35, (4, 16), (16, 1), 0), reinterpret_tensor(buf38, (4, 16), (16, 1), 0), buf39, buf41, reinterpret_tensor(buf42, (4, 16), (16, 1), 0), reinterpret_tensor( buf45, (4, 16), (16, 1), 0), buf46, buf48, reinterpret_tensor(buf49, (4, 32), (32, 1), 0), reinterpret_tensor(buf52, (4, 32), (32, 1), 0 ), buf53, buf55, reinterpret_tensor(buf56, (4, 32), (32, 1), 0), reinterpret_tensor(buf59, (4, 32), (32, 1), 0), buf61) class encoderDepthNew(nn.Module): def __init__(self): super(encoderDepthNew, self).__init__() self.conv1 = nn.Conv2d(in_channels=13, out_channels=64, kernel_size =4, stride=2, padding=1, bias=True) self.gn1 = nn.GroupNorm(4, 64) self.conv2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size =3, stride=1, padding=1, bias=True) self.gn2 = nn.GroupNorm(4, 64) self.conv3 = nn.Conv2d(in_channels=64, out_channels=256, kernel_size=4, stride=2, padding=1, bias=True) self.gn3 = nn.GroupNorm(16, 256) self.conv4 = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1, bias=True) self.gn4 = nn.GroupNorm(16, 256) self.conv5 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=4, stride=2, padding=1, bias=True) self.gn5 = nn.GroupNorm(32, 512) self.conv6 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1, bias=True) self.gn6 = nn.GroupNorm(32, 512) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.gn1.weight primals_5 = self.gn1.bias primals_6 = self.conv2.weight primals_7 = self.conv2.bias primals_8 = self.gn2.weight primals_9 = self.gn2.bias primals_10 = self.conv3.weight primals_11 = self.conv3.bias primals_12 = self.gn3.weight primals_13 = self.gn3.bias primals_14 = self.conv4.weight primals_15 = self.conv4.bias primals_16 = self.gn4.weight primals_17 = self.gn4.bias primals_18 = self.conv5.weight primals_19 = self.conv5.bias primals_20 = self.gn5.weight primals_21 = self.gn5.bias primals_22 = self.conv6.weight primals_23 = self.conv6.bias primals_24 = self.gn6.weight primals_25 = self.gn6.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25]) return output[0]
Miles629/TransparentShapeRealData
encoderDepth
false
14,095
[ "MIT" ]
91
b81098a2d1882f5fd33fba6167d7258dbe02d6d2
https://github.com/Miles629/TransparentShapeRealData/tree/b81098a2d1882f5fd33fba6167d7258dbe02d6d2
StyleMod
import torch import torch.nn as nn import torch.nn.functional as F class MyLinear(nn.Module): """Linear layer with equalized learning rate and custom learning rate multiplier.""" def __init__(self, input_size, output_size, gain=2 ** 0.5, use_wscale= False, lrmul=1, bias=True): super().__init__() he_std = gain * input_size ** -0.5 if use_wscale: init_std = 1.0 / lrmul self.w_mul = he_std * lrmul else: init_std = he_std / lrmul self.w_mul = lrmul self.weight = torch.nn.Parameter(torch.randn(output_size, input_size) * init_std) if bias: self.bias = torch.nn.Parameter(torch.zeros(output_size)) self.b_mul = lrmul else: self.bias = None def forward(self, x): bias = self.bias if bias is not None: bias = bias * self.b_mul return F.linear(x, self.weight * self.w_mul, bias) class StyleMod(nn.Module): def __init__(self, latent_size, channels, use_wscale): super(StyleMod, self).__init__() self.lin = MyLinear(latent_size, channels * 2, gain=1.0, use_wscale =use_wscale) def forward(self, x, latent): style = self.lin(latent) shape = [-1, 2, x.size(1)] + (x.dim() - 2) * [1] style = style.view(shape) x = x * (style[:, 0] + 1.0) + style[:, 1] return x def get_inputs(): return [torch.rand([64, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'latent_size': 4, 'channels': 4, 'use_wscale': 1.0}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 4 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (x1 + 8 * x2), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (4 + x1 + 8 * x2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr2 + (4 + x1), None, eviction_policy='evict_last') tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tmp1 + tmp4 tmp6 = tmp5 + tmp3 tmp7 = tmp0 * tmp6 tmp10 = tmp9 * tmp3 tmp11 = tmp8 + tmp10 tmp12 = tmp7 + tmp11 tl.store(out_ptr0 + x3, tmp12, None) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (8,), (1,)) assert_size_stride(primals_2, (8, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(32)](primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 8), (1, 4), 0), out=buf1) del buf0 buf2 = empty_strided_cuda((64, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_1[grid(4096)](primals_4, buf1, primals_1, buf2, 4096, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_1 return buf2, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class MyLinear(nn.Module): """Linear layer with equalized learning rate and custom learning rate multiplier.""" def __init__(self, input_size, output_size, gain=2 ** 0.5, use_wscale= False, lrmul=1, bias=True): super().__init__() he_std = gain * input_size ** -0.5 if use_wscale: init_std = 1.0 / lrmul self.w_mul = he_std * lrmul else: init_std = he_std / lrmul self.w_mul = lrmul self.weight = torch.nn.Parameter(torch.randn(output_size, input_size) * init_std) if bias: self.bias = torch.nn.Parameter(torch.zeros(output_size)) self.b_mul = lrmul else: self.bias = None def forward(self, x): bias = self.bias if bias is not None: bias = bias * self.b_mul return F.linear(x, self.weight * self.w_mul, bias) class StyleModNew(nn.Module): def __init__(self, latent_size, channels, use_wscale): super(StyleModNew, self).__init__() self.lin = MyLinear(latent_size, channels * 2, gain=1.0, use_wscale =use_wscale) def forward(self, input_0, input_1): primals_2 = self.lin.weight primals_1 = self.lin.bias primals_4 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
NeuralBending/StyleCLIP
StyleMod
false
14,096
[ "MIT" ]
91
190d3a0d48823ccdbdd15c7f8af6e08703a6dbd8
https://github.com/NeuralBending/StyleCLIP/tree/190d3a0d48823ccdbdd15c7f8af6e08703a6dbd8
MovingAverage
import torch import torch.utils.data import torch.nn as torch_nn import torch.nn.functional as torch_nn_func class Conv1dKeepLength(torch_nn.Conv1d): """ Wrapper for causal convolution Input tensor: (batchsize=1, length, dim_in) Output tensor: (batchsize=1, length, dim_out) https://github.com/pytorch/pytorch/issues/1333 Note: Tanh is optional """ def __init__(self, input_dim, output_dim, dilation_s, kernel_s, causal= False, stride=1, groups=1, bias=True, tanh=True, pad_mode='constant'): super(Conv1dKeepLength, self).__init__(input_dim, output_dim, kernel_s, stride=stride, padding=0, dilation=dilation_s, groups =groups, bias=bias) self.pad_mode = pad_mode self.causal = causal if self.causal: self.pad_le = dilation_s * (kernel_s - 1) self.pad_ri = 0 else: self.pad_le = dilation_s * (kernel_s - 1) // 2 self.pad_ri = dilation_s * (kernel_s - 1) - self.pad_le if tanh: self.l_ac = torch_nn.Tanh() else: self.l_ac = torch_nn.Identity() def forward(self, data): x = torch_nn_func.pad(data.permute(0, 2, 1).unsqueeze(2), (self. pad_le, self.pad_ri, 0, 0), mode=self.pad_mode).squeeze(2) output = self.l_ac(super(Conv1dKeepLength, self).forward(x)) return output.permute(0, 2, 1) class MovingAverage(Conv1dKeepLength): """ Wrapper to define a moving average smoothing layer Note: MovingAverage can be implemented using TimeInvFIRFilter too. Here we define another Module dicrectly on Conv1DKeepLength """ def __init__(self, feature_dim, window_len, causal=False, pad_mode= 'replicate'): super(MovingAverage, self).__init__(feature_dim, feature_dim, 1, window_len, causal, groups=feature_dim, bias=False, tanh=False, pad_mode=pad_mode) torch_nn.init.constant_(self.weight, 1 / window_len) for p in self.parameters(): p.requires_grad = False def forward(self, data): return super(MovingAverage, self).forward(data) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'feature_dim': 4, 'window_len': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.nn as torch_nn import torch.nn.functional as torch_nn_func assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_replication_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 112 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 7 x1 = xindex // 7 % 4 x2 = xindex // 28 x3 = xindex tmp0 = tl.load(in_ptr0 + (x1 + 4 * (3 * (3 <= 0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) + (0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) * (0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0) < 3)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 7), (28, 7, 7, 1), torch.float32) get_raw_stream(0) triton_poi_fused_replication_pad2d_0[grid(112)](arg0_1, buf0, 112, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (4, 4, 7 ), (28, 7, 1), 0), arg1_1, stride=(1,), padding=(0,), dilation= (1,), transposed=False, output_padding=(0,), groups=4, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) del arg1_1 del buf0 return reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), class Conv1dKeepLength(torch_nn.Conv1d): """ Wrapper for causal convolution Input tensor: (batchsize=1, length, dim_in) Output tensor: (batchsize=1, length, dim_out) https://github.com/pytorch/pytorch/issues/1333 Note: Tanh is optional """ def __init__(self, input_dim, output_dim, dilation_s, kernel_s, causal= False, stride=1, groups=1, bias=True, tanh=True, pad_mode='constant'): super(Conv1dKeepLength, self).__init__(input_dim, output_dim, kernel_s, stride=stride, padding=0, dilation=dilation_s, groups =groups, bias=bias) self.pad_mode = pad_mode self.causal = causal if self.causal: self.pad_le = dilation_s * (kernel_s - 1) self.pad_ri = 0 else: self.pad_le = dilation_s * (kernel_s - 1) // 2 self.pad_ri = dilation_s * (kernel_s - 1) - self.pad_le if tanh: self.l_ac = torch_nn.Tanh() else: self.l_ac = torch_nn.Identity() def forward(self, data): x = torch_nn_func.pad(data.permute(0, 2, 1).unsqueeze(2), (self. pad_le, self.pad_ri, 0, 0), mode=self.pad_mode).squeeze(2) output = self.l_ac(super(Conv1dKeepLength, self).forward(x)) return output.permute(0, 2, 1) class MovingAverageNew(Conv1dKeepLength): """ Wrapper to define a moving average smoothing layer Note: MovingAverage can be implemented using TimeInvFIRFilter too. Here we define another Module dicrectly on Conv1DKeepLength """ def __init__(self, feature_dim, window_len, causal=False, pad_mode= 'replicate'): super(MovingAverageNew, self).__init__(feature_dim, feature_dim, 1, window_len, causal, groups=feature_dim, bias=False, tanh=False, pad_mode=pad_mode) torch_nn.init.constant_(self.weight, 1 / window_len) for p in self.parameters(): p.requires_grad = False def forward(self, input_0): arg1_1 = self.weight arg0_1 = input_0 output = call([arg0_1, arg1_1]) return output[0]
Ninushkat/Impact-Synth-Hardware
MovingAverage
false
14,097
[ "MIT" ]
55
37a2ecfec51b052b39d1ad0d4676f09d5f00e3c2
https://github.com/Ninushkat/Impact-Synth-Hardware/tree/37a2ecfec51b052b39d1ad0d4676f09d5f00e3c2
FlowEntropy
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class FlowEntropy(nn.Module): """ Computes entropy from matching cost """ def __init__(self): super(FlowEntropy, self).__init__() def forward(self, x): """ Performs forward pass. Parameters ---------- x : torch.Tensor A tensor of shape B x U x V x H x W representing the cost Returns ------- torch.Tensor A tensor of shape B x 1 x H x W """ x = torch.squeeze(x, 1) B, U, V, H, W = x.shape x = x.view(B, -1, H, W) x = F.softmax(x, dim=1).view(B, U, V, H, W) global_entropy = (-x * torch.clamp(x, 1e-09, 1 - 1e-09).log()).sum(1 ).sum(1)[:, np.newaxis] global_entropy /= np.log(x.shape[1] * x.shape[2]) return global_entropy def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 16 x1 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 256 * x1), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tl.store(out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr1 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_clamp_log_mul_neg_sum_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 64 x3 = xindex % 64 x0 = xindex % 16 x4 = xindex tmp0 = tl.load(in_ptr0 + (x3 + 256 * x2), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (64 + x3 + 256 * x2), xmask) tmp23 = tl.load(in_ptr0 + (128 + x3 + 256 * x2), xmask) tmp33 = tl.load(in_ptr0 + (192 + x3 + 256 * x2), xmask) tmp2 = tmp0 - tmp1 tmp3 = tl_math.exp(tmp2) tmp5 = tmp3 / tmp4 tmp6 = -tmp5 tmp7 = 1e-09 tmp8 = triton_helpers.maximum(tmp5, tmp7) tmp9 = 0.999999999 tmp10 = triton_helpers.minimum(tmp8, tmp9) tmp11 = tl_math.log(tmp10) tmp12 = tmp6 * tmp11 tmp14 = tmp13 - tmp1 tmp15 = tl_math.exp(tmp14) tmp16 = tmp15 / tmp4 tmp17 = -tmp16 tmp18 = triton_helpers.maximum(tmp16, tmp7) tmp19 = triton_helpers.minimum(tmp18, tmp9) tmp20 = tl_math.log(tmp19) tmp21 = tmp17 * tmp20 tmp22 = tmp12 + tmp21 tmp24 = tmp23 - tmp1 tmp25 = tl_math.exp(tmp24) tmp26 = tmp25 / tmp4 tmp27 = -tmp26 tmp28 = triton_helpers.maximum(tmp26, tmp7) tmp29 = triton_helpers.minimum(tmp28, tmp9) tmp30 = tl_math.log(tmp29) tmp31 = tmp27 * tmp30 tmp32 = tmp22 + tmp31 tmp34 = tmp33 - tmp1 tmp35 = tl_math.exp(tmp34) tmp36 = tmp35 / tmp4 tmp37 = -tmp36 tmp38 = triton_helpers.maximum(tmp36, tmp7) tmp39 = triton_helpers.minimum(tmp38, tmp9) tmp40 = tl_math.log(tmp39) tmp41 = tmp37 * tmp40 tmp42 = tmp32 + tmp41 tl.store(out_ptr0 + x4, tmp42, xmask) @triton.jit def triton_poi_fused_div_log_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 2.772588722239781 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused__softmax_0[grid(64)](arg0_1, buf0, buf1, 64, 16, XBLOCK=1, num_warps=2, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clamp_log_mul_neg_sum_1[grid(256)](arg0_1, buf0, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del buf0 buf3 = reinterpret_tensor(buf1, (4, 1, 4, 4), (16, 1, 4, 1), 0) del buf1 triton_poi_fused_div_log_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 return reinterpret_tensor(buf3, (4, 1, 4, 4), (16, 16, 4, 1), 0), class FlowEntropyNew(nn.Module): """ Computes entropy from matching cost """ def __init__(self): super(FlowEntropyNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
NeelayS/ezflow
FlowEntropy
false
14,098
[ "MIT" ]
94
b93a48c4adf5021f7eacbfc43220c7efa5ae55cd
https://github.com/NeelayS/ezflow/tree/b93a48c4adf5021f7eacbfc43220c7efa5ae55cd
Depth_Pointwise_Conv1d
import torch from torch import nn class Depth_Pointwise_Conv1d(nn.Module): def __init__(self, in_ch, out_ch, k): super().__init__() if k == 1: self.depth_conv = nn.Identity() else: self.depth_conv = nn.Conv1d(in_channels=in_ch, out_channels= in_ch, kernel_size=k, groups=in_ch, padding=k // 2) self.pointwise_conv = nn.Conv1d(in_channels=in_ch, out_channels= out_ch, kernel_size=1, groups=1) def forward(self, x): out = self.pointwise_conv(self.depth_conv(x)) return out def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_ch': 4, 'out_ch': 4, 'k': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 5 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 1, 4), (4, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=(0,), groups=4, bias=None) assert_size_stride(buf0, (1, 4, 5), (20, 5, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(20)](buf1, primals_2, 20, XBLOCK=32, num_warps=1, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 5 ), (0, 5, 1), 0), primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (1, 4, 5), (20, 5, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_0[grid(20)](buf3, primals_5, 20, XBLOCK=32, num_warps=1, num_stages=1) del primals_5 return reinterpret_tensor(buf3, (4, 5), (5, 1), 0 ), primals_1, primals_4, reinterpret_tensor(primals_3, (1, 4, 4), ( 16, 4, 1), 0), buf1 class Depth_Pointwise_Conv1dNew(nn.Module): def __init__(self, in_ch, out_ch, k): super().__init__() if k == 1: self.depth_conv = nn.Identity() else: self.depth_conv = nn.Conv1d(in_channels=in_ch, out_channels= in_ch, kernel_size=k, groups=in_ch, padding=k // 2) self.pointwise_conv = nn.Conv1d(in_channels=in_ch, out_channels= out_ch, kernel_size=1, groups=1) def forward(self, input_0): primals_1 = self.depth_conv.weight primals_2 = self.depth_conv.bias primals_4 = self.pointwise_conv.weight primals_5 = self.pointwise_conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Nitin-Mane/External-Attention-pytorch
Depth_Pointwise_Conv1d
false
14,099
[ "MIT" ]
4,466
1ceda306c41063af11c956334747763444a4d83f
https://github.com/Nitin-Mane/External-Attention-pytorch/tree/1ceda306c41063af11c956334747763444a4d83f
FocalLossSigmoid
import torch import torch.nn as nn from math import sqrt as sqrt from itertools import product as product class FocalLossSigmoid(nn.Module): """ sigmoid version focal loss """ def __init__(self, alpha=0.25, gamma=2, size_average=False): super(FocalLossSigmoid, self).__init__() self.alpha = alpha self.gamma = gamma self.size_average = size_average def forward(self, inputs, targets): inputs.size(0) inputs.size(1) P = torch.sigmoid(inputs) alpha_mask = self.alpha * targets loss_pos = -1.0 * torch.pow(1 - P, self.gamma) * torch.log(P ) * targets * alpha_mask loss_neg = -1.0 * torch.pow(1 - P, self.gamma) * torch.log(1 - P) * ( 1 - targets) * (1 - alpha_mask) batch_loss = loss_neg + loss_pos if self.size_average: loss = batch_loss.mean() else: loss = batch_loss.sum() return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn from math import sqrt as sqrt from itertools import product as product assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_log_mul_pow_rsub_sigmoid_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp9 = tl.load(in_ptr1 + r0, None) tmp1 = tl.sigmoid(tmp0) tmp2 = 1.0 tmp3 = tmp2 - tmp1 tmp4 = tmp3 * tmp3 tmp5 = -1.0 tmp6 = tmp4 * tmp5 tmp7 = tl_math.log(tmp3) tmp8 = tmp6 * tmp7 tmp10 = tmp2 - tmp9 tmp11 = tmp8 * tmp10 tmp12 = 0.25 tmp13 = tmp9 * tmp12 tmp14 = tmp2 - tmp13 tmp15 = tmp11 * tmp14 tmp16 = tl_math.log(tmp1) tmp17 = tmp6 * tmp16 tmp18 = tmp17 * tmp9 tmp19 = tmp18 * tmp13 tmp20 = tmp15 + tmp19 tmp21 = tl.broadcast_to(tmp20, [RBLOCK]) tmp23 = triton_helpers.promote_to_tensor(tl.sum(tmp21, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp23, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_add_log_mul_pow_rsub_sigmoid_sum_0[grid(1)](arg0_1, arg1_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf0, class FocalLossSigmoidNew(nn.Module): """ sigmoid version focal loss """ def __init__(self, alpha=0.25, gamma=2, size_average=False): super(FocalLossSigmoidNew, self).__init__() self.alpha = alpha self.gamma = gamma self.size_average = size_average def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
No43problem/SSD_Pytorch
FocalLossSigmoid
false
14,100
[ "MIT" ]
163
ddc548824bffbc83b540a68b176ee0261b133ee0
https://github.com/No43problem/SSD_Pytorch/tree/ddc548824bffbc83b540a68b176ee0261b133ee0
FlowHead
import torch import torch.nn as nn class FlowHead(nn.Module): """ Applies two 2D convolutions over an input feature map to generate a flow tensor of shape N x 2 x H x W. Parameters ---------- input_dim : int, default: 128 Number of input dimensions. hidden_dim : int, default: 256 Number of hidden dimensions. """ def __init__(self, input_dim=128, hidden_dim=256): super(FlowHead, self).__init__() self.conv1 = nn.Conv2d(input_dim, hidden_dim, 3, padding=1) self.conv2 = nn.Conv2d(hidden_dim, 2, 3, padding=1) self.relu = nn.ReLU(inplace=True) def forward(self, x): """ Performs forward pass. Parameters ---------- x : torch.Tensor Input tensor of shape N x input_dim x H x W Returns ------- torch.Tensor A tensor of shape N x 2 x H x W """ return self.conv2(self.relu(self.conv1(x))) def get_inputs(): return [torch.rand([4, 128, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 2 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 128, 64, 64), (524288, 4096, 64, 1)) assert_size_stride(primals_4, (2, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_5, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 256, 64, 64), (1048576, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(4194304)](buf1, primals_2, 4194304, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 2, 64, 64), (8192, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(32768)](buf3, primals_5, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1 class FlowHeadNew(nn.Module): """ Applies two 2D convolutions over an input feature map to generate a flow tensor of shape N x 2 x H x W. Parameters ---------- input_dim : int, default: 128 Number of input dimensions. hidden_dim : int, default: 256 Number of hidden dimensions. """ def __init__(self, input_dim=128, hidden_dim=256): super(FlowHeadNew, self).__init__() self.conv1 = nn.Conv2d(input_dim, hidden_dim, 3, padding=1) self.conv2 = nn.Conv2d(hidden_dim, 2, 3, padding=1) self.relu = nn.ReLU(inplace=True) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
NeelayS/ezflow
FlowHead
false
14,101
[ "MIT" ]
94
b93a48c4adf5021f7eacbfc43220c7efa5ae55cd
https://github.com/NeelayS/ezflow/tree/b93a48c4adf5021f7eacbfc43220c7efa5ae55cd
DC_layer
import torch import torch.nn as nn def Maxout(x1, x2, x3, x4): mask_1 = torch.ge(x1, x2) mask_1 = mask_1.float() x = mask_1 * x1 + (1 - mask_1) * x2 mask_2 = torch.ge(x, x3) mask_2 = mask_2.float() x = mask_2 * x + (1 - mask_2) * x3 mask_3 = torch.ge(x, x4) mask_3 = mask_3.float() x = mask_3 * x + (1 - mask_3) * x4 return x class DC_layer(nn.Module): def __init__(self, level, fuse=False): super(DC_layer, self).__init__() self.level = level self.conv1x1_d1 = nn.Conv2d(512, 512, kernel_size=1) self.conv1x1_d2 = nn.Conv2d(512, 512, kernel_size=1) self.conv1x1_d3 = nn.Conv2d(512, 512, kernel_size=1) self.conv1x1_d4 = nn.Conv2d(512, 512, kernel_size=1) self.conv_d1 = nn.Conv2d(512, 512, kernel_size=3, padding=1, dilation=1 ) self.conv_d2 = nn.Conv2d(512, 512, kernel_size=3, padding=2, dilation=2 ) self.conv_d3 = nn.Conv2d(512, 512, kernel_size=3, padding=3, dilation=3 ) self.conv_d4 = nn.Conv2d(512, 512, kernel_size=3, padding=4, dilation=4 ) self.fuse = fuse if self.fuse: self.fuse = nn.Conv2d(512 * 2, 512, kernel_size=3, padding=1) self.relu = nn.ReLU(inplace=True) def forward(self, x): x1 = self.conv1x1_d1(x) x2 = self.conv1x1_d2(x) x3 = self.conv1x1_d3(x) x4 = self.conv1x1_d4(x) x1 = self.conv_d1(x1) x2 = self.conv_d2(x2) x3 = self.conv_d3(x3) x4 = self.conv_d4(x4) x = Maxout(x1, x2, x3, x4) return x def get_inputs(): return [torch.rand([4, 512, 64, 64])] def get_init_inputs(): return [[], {'level': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, None) @triton.jit def triton_poi_fused__to_copy_add_convolution_ge_mul_rsub_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, out_ptr2, out_ptr3, out_ptr4, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 512 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex y2 = yindex % 4096 y3 = yindex // 4096 tmp0 = tl.load(in_ptr0 + (x1 + 512 * y0), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1 + 512 * y0), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr4 + (x1 + 512 * y0), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr6 + (x1 + 512 * y0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr7 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 >= tmp5 tmp7 = tmp6.to(tl.float32) tmp8 = tmp7 * tmp2 tmp9 = 1.0 tmp10 = tmp9 - tmp7 tmp11 = tmp10 * tmp5 tmp12 = tmp8 + tmp11 tmp15 = tmp13 + tmp14 tmp16 = tmp12 >= tmp15 tmp17 = tmp16.to(tl.float32) tmp18 = tmp17 * tmp12 tmp19 = tmp9 - tmp17 tmp20 = tmp19 * tmp15 tmp21 = tmp18 + tmp20 tmp24 = tmp22 + tmp23 tmp25 = tmp21 >= tmp24 tmp26 = tmp25.to(tl.float32) tmp27 = tmp26 * tmp21 tmp28 = tmp9 - tmp26 tmp29 = tmp28 * tmp24 tmp30 = tmp27 + tmp29 tl.store(out_ptr0 + (x1 + 512 * y0), tmp6, xmask) tl.store(out_ptr2 + (x1 + 512 * y0), tmp16, xmask) tl.store(out_ptr3 + (x1 + 512 * y0), tmp25, xmask) tl.store(out_ptr4 + (y2 + 4096 * x1 + 2097152 * y3), tmp30, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (512, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_2, (512,), (1,)) assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1)) assert_size_stride(primals_4, (512, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_5, (512,), (1,)) assert_size_stride(primals_6, (512, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_7, (512,), (1,)) assert_size_stride(primals_8, (512, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_9, (512,), (1,)) assert_size_stride(primals_10, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_11, (512,), (1,)) assert_size_stride(primals_12, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_13, (512,), (1,)) assert_size_stride(primals_14, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_15, (512,), (1,)) assert_size_stride(primals_16, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_17, (512,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512 ), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(2048, 4096)](primals_3, buf0, 2048, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf1 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_1[grid(262144, 9)](primals_10, buf1, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf2 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_1[grid(262144, 9)](primals_12, buf2, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_12 buf3 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_1[grid(262144, 9)](primals_14, buf3, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_14 buf4 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_1[grid(262144, 9)](primals_16, buf4, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_16 buf5 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 512, 64, 64), (2097152, 1, 32768, 512)) buf6 = buf5 del buf5 triton_poi_fused_convolution_2[grid(8388608)](buf6, primals_2, 8388608, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf7 = extern_kernels.convolution(buf0, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 512, 64, 64), (2097152, 1, 32768, 512)) buf8 = buf7 del buf7 triton_poi_fused_convolution_2[grid(8388608)](buf8, primals_5, 8388608, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf9 = extern_kernels.convolution(buf0, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 512, 64, 64), (2097152, 1, 32768, 512)) buf10 = buf9 del buf9 triton_poi_fused_convolution_2[grid(8388608)](buf10, primals_7, 8388608, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf11 = extern_kernels.convolution(buf0, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 512, 64, 64), (2097152, 1, 32768, 512)) buf12 = buf11 del buf11 triton_poi_fused_convolution_2[grid(8388608)](buf12, primals_9, 8388608, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf13 = extern_kernels.convolution(buf6, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 512, 64, 64), (2097152, 1, 32768, 512)) buf14 = extern_kernels.convolution(buf8, buf2, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 512, 64, 64), (2097152, 1, 32768, 512)) buf15 = extern_kernels.convolution(buf10, buf3, stride=(1, 1), padding=(3, 3), dilation=(3, 3), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 512, 64, 64), (2097152, 1, 32768, 512)) buf16 = extern_kernels.convolution(buf12, buf4, stride=(1, 1), padding=(4, 4), dilation=(4, 4), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 512, 64, 64), (2097152, 1, 32768, 512)) buf17 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512), torch.bool) buf19 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512), torch.bool) buf20 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512), torch.bool) buf21 = empty_strided_cuda((4, 512, 64, 64), (2097152, 4096, 64, 1), torch.float32) triton_poi_fused__to_copy_add_convolution_ge_mul_rsub_3[grid(16384, 512)](buf13, primals_11, buf14, primals_13, buf15, primals_15, buf16, primals_17, buf17, buf19, buf20, buf21, 16384, 512, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf13 del buf14 del buf15 del buf16 del primals_11 del primals_13 del primals_15 del primals_17 return (buf21, primals_1, buf0, primals_4, primals_6, primals_8, buf1, buf2, buf3, buf4, buf6, buf8, buf10, buf12, buf17, buf19, buf20) def Maxout(x1, x2, x3, x4): mask_1 = torch.ge(x1, x2) mask_1 = mask_1.float() x = mask_1 * x1 + (1 - mask_1) * x2 mask_2 = torch.ge(x, x3) mask_2 = mask_2.float() x = mask_2 * x + (1 - mask_2) * x3 mask_3 = torch.ge(x, x4) mask_3 = mask_3.float() x = mask_3 * x + (1 - mask_3) * x4 return x class DC_layerNew(nn.Module): def __init__(self, level, fuse=False): super(DC_layerNew, self).__init__() self.level = level self.conv1x1_d1 = nn.Conv2d(512, 512, kernel_size=1) self.conv1x1_d2 = nn.Conv2d(512, 512, kernel_size=1) self.conv1x1_d3 = nn.Conv2d(512, 512, kernel_size=1) self.conv1x1_d4 = nn.Conv2d(512, 512, kernel_size=1) self.conv_d1 = nn.Conv2d(512, 512, kernel_size=3, padding=1, dilation=1 ) self.conv_d2 = nn.Conv2d(512, 512, kernel_size=3, padding=2, dilation=2 ) self.conv_d3 = nn.Conv2d(512, 512, kernel_size=3, padding=3, dilation=3 ) self.conv_d4 = nn.Conv2d(512, 512, kernel_size=3, padding=4, dilation=4 ) self.fuse = fuse if self.fuse: self.fuse = nn.Conv2d(512 * 2, 512, kernel_size=3, padding=1) self.relu = nn.ReLU(inplace=True) def forward(self, input_0): primals_1 = self.conv1x1_d1.weight primals_2 = self.conv1x1_d1.bias primals_4 = self.conv1x1_d2.weight primals_5 = self.conv1x1_d2.bias primals_6 = self.conv1x1_d3.weight primals_7 = self.conv1x1_d3.bias primals_8 = self.conv1x1_d4.weight primals_9 = self.conv1x1_d4.bias primals_10 = self.conv_d1.weight primals_11 = self.conv_d1.bias primals_12 = self.conv_d2.weight primals_13 = self.conv_d2.bias primals_14 = self.conv_d3.weight primals_15 = self.conv_d3.bias primals_16 = self.conv_d4.weight primals_17 = self.conv_d4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0]
Min-Sheng/Local-Crowd-Counting
DC_layer
false
14,103
[ "MIT" ]
75
388343d3ec2d08747d537437e4c880fd0047df83
https://github.com/Min-Sheng/Local-Crowd-Counting/tree/388343d3ec2d08747d537437e4c880fd0047df83
ChannelAttentionModule
import torch import numpy as np from torch import nn from torch.nn import init class SimplifiedScaledDotProductAttention(nn.Module): """ Scaled dot-product attention """ def __init__(self, d_model, h, dropout=0.1): """ :param d_model: Output dimensionality of the model :param d_k: Dimensionality of queries and keys :param d_v: Dimensionality of values :param h: Number of heads """ super(SimplifiedScaledDotProductAttention, self).__init__() self.d_model = d_model self.d_k = d_model // h self.d_v = d_model // h self.h = h self.fc_o = nn.Linear(h * self.d_v, d_model) self.dropout = nn.Dropout(dropout) self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.001) if m.bias is not None: init.constant_(m.bias, 0) def forward(self, queries, keys, values, attention_mask=None, attention_weights=None): """ Computes :param queries: Queries (b_s, nq, d_model) :param keys: Keys (b_s, nk, d_model) :param values: Values (b_s, nk, d_model) :param attention_mask: Mask over attention values (b_s, h, nq, nk). True indicates masking. :param attention_weights: Multiplicative weights for attention values (b_s, h, nq, nk). :return: """ b_s, nq = queries.shape[:2] nk = keys.shape[1] q = queries.view(b_s, nq, self.h, self.d_k).permute(0, 2, 1, 3) k = keys.view(b_s, nk, self.h, self.d_k).permute(0, 2, 3, 1) v = values.view(b_s, nk, self.h, self.d_v).permute(0, 2, 1, 3) att = torch.matmul(q, k) / np.sqrt(self.d_k) if attention_weights is not None: att = att * attention_weights if attention_mask is not None: att = att.masked_fill(attention_mask, -np.inf) att = torch.softmax(att, -1) att = self.dropout(att) out = torch.matmul(att, v).permute(0, 2, 1, 3).contiguous().view(b_s, nq, self.h * self.d_v) out = self.fc_o(out) return out class ChannelAttentionModule(nn.Module): def __init__(self, d_model=512, kernel_size=3, H=7, W=7): super().__init__() self.cnn = nn.Conv2d(d_model, d_model, kernel_size=kernel_size, padding=(kernel_size - 1) // 2) self.pa = SimplifiedScaledDotProductAttention(H * W, h=1) def forward(self, x): bs, c, _h, _w = x.shape y = self.cnn(x) y = y.view(bs, c, -1) y = self.pa(y, y, y) return y def get_inputs(): return [torch.rand([4, 512, 1, 49])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np from torch import nn from torch.nn import init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 49 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 49 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 512 * x2 + 25088 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, None) @triton.jit def triton_per_fused__softmax_sqrt_3(in_ptr0, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 512 * x0), None) tmp1 = tl.full([1], 7.0, tl.float64) tmp2 = tl.full([1], 0.0, tl.float64) tmp3 = tmp1 >= tmp2 tmp4 = 1.0 tmp5 = -1.0 tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = tmp0 * tmp6 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp8, 0)) tmp11 = tmp7 - tmp10 tmp12 = tmp6.to(tl.float64) tmp13 = tmp12 * tmp1 tmp14 = tmp13.to(tl.float32) tmp15 = tmp11 / tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tl.broadcast_to(tmp16, [RBLOCK]) tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0)) tmp20 = tmp16 / tmp19 tl.store(out_ptr2 + (r1 + 512 * x0), tmp20, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 512, 1, 49), (25088, 49, 49, 1)) assert_size_stride(primals_2, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_3, (512,), (1,)) assert_size_stride(primals_4, (49, 49), (49, 1)) assert_size_stride(primals_5, (49,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512, 1, 49), (25088, 1, 25088, 512), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(2048, 49)](primals_1, buf0, 2048, 49, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_1[grid(262144, 9)](primals_2, buf1, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf0, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 512, 1, 49), (25088, 1, 25088, 512)) buf3 = buf2 del buf2 triton_poi_fused_convolution_2[grid(100352)](buf3, primals_3, 100352, XBLOCK=1024, num_warps=4, num_stages=1) del primals_3 buf4 = empty_strided_cuda((4, 512, 512), (262144, 512, 1), torch. float32) extern_kernels.bmm(reinterpret_tensor(buf3, (4, 512, 49), (25088, 1, 512), 0), reinterpret_tensor(buf3, (4, 49, 512), (25088, 512, 1 ), 0), out=buf4) buf7 = empty_strided_cuda((4, 1, 512, 512), (262144, 1, 512, 1), torch.float32) triton_per_fused__softmax_sqrt_3[grid(2048)](buf4, buf7, 2048, 512, num_warps=4, num_stages=1) del buf4 buf8 = empty_strided_cuda((4, 512, 49), (25088, 49, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf7, (4, 512, 512), (262144, 512, 1), 0), reinterpret_tensor(buf3, (4, 512, 49), (25088, 1, 512), 0), out=buf8) buf9 = empty_strided_cuda((2048, 49), (49, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf8, (2048, 49), (49, 1), 0), reinterpret_tensor(primals_4, (49, 49), (1, 49), 0 ), alpha=1, beta=1, out=buf9) del primals_5 return reinterpret_tensor(buf9, (4, 512, 49), (25088, 49, 1), 0 ), buf0, buf1, buf3, buf7, reinterpret_tensor(buf8, (2048, 49), (49, 1), 0), primals_4 class SimplifiedScaledDotProductAttention(nn.Module): """ Scaled dot-product attention """ def __init__(self, d_model, h, dropout=0.1): """ :param d_model: Output dimensionality of the model :param d_k: Dimensionality of queries and keys :param d_v: Dimensionality of values :param h: Number of heads """ super(SimplifiedScaledDotProductAttention, self).__init__() self.d_model = d_model self.d_k = d_model // h self.d_v = d_model // h self.h = h self.fc_o = nn.Linear(h * self.d_v, d_model) self.dropout = nn.Dropout(dropout) self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.001) if m.bias is not None: init.constant_(m.bias, 0) def forward(self, queries, keys, values, attention_mask=None, attention_weights=None): """ Computes :param queries: Queries (b_s, nq, d_model) :param keys: Keys (b_s, nk, d_model) :param values: Values (b_s, nk, d_model) :param attention_mask: Mask over attention values (b_s, h, nq, nk). True indicates masking. :param attention_weights: Multiplicative weights for attention values (b_s, h, nq, nk). :return: """ b_s, nq = queries.shape[:2] nk = keys.shape[1] q = queries.view(b_s, nq, self.h, self.d_k).permute(0, 2, 1, 3) k = keys.view(b_s, nk, self.h, self.d_k).permute(0, 2, 3, 1) v = values.view(b_s, nk, self.h, self.d_v).permute(0, 2, 1, 3) att = torch.matmul(q, k) / np.sqrt(self.d_k) if attention_weights is not None: att = att * attention_weights if attention_mask is not None: att = att.masked_fill(attention_mask, -np.inf) att = torch.softmax(att, -1) att = self.dropout(att) out = torch.matmul(att, v).permute(0, 2, 1, 3).contiguous().view(b_s, nq, self.h * self.d_v) out = self.fc_o(out) return out class ChannelAttentionModuleNew(nn.Module): def __init__(self, d_model=512, kernel_size=3, H=7, W=7): super().__init__() self.cnn = nn.Conv2d(d_model, d_model, kernel_size=kernel_size, padding=(kernel_size - 1) // 2) self.pa = SimplifiedScaledDotProductAttention(H * W, h=1) def forward(self, input_0): primals_2 = self.cnn.weight primals_3 = self.cnn.bias primals_4 = self.pa.fc_o.weight primals_5 = self.pa.fc_o.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Nitin-Mane/External-Attention-pytorch
ChannelAttentionModule
false
14,104
[ "MIT" ]
4,466
1ceda306c41063af11c956334747763444a4d83f
https://github.com/Nitin-Mane/External-Attention-pytorch/tree/1ceda306c41063af11c956334747763444a4d83f
ExternalAttention
import torch from torch import nn from torch.nn import init class ExternalAttention(nn.Module): def __init__(self, d_model, S=64): super().__init__() self.mk = nn.Linear(d_model, S, bias=False) self.mv = nn.Linear(S, d_model, bias=False) self.softmax = nn.Softmax(dim=1) self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.001) if m.bias is not None: init.constant_(m.bias, 0) def forward(self, queries): attn = self.mk(queries) attn = self.softmax(attn) attn = attn / torch.sum(attn, dim=2, keepdim=True) out = self.mv(attn) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn from torch.nn import init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 256 x2 = xindex // 1024 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + (x0 + 1024 * x2), None, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (256 + x0 + 1024 * x2), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (512 + x0 + 1024 * x2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (768 + x0 + 1024 * x2), None, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, None) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 256 x2 = xindex // 1024 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + (x0 + 1024 * x2), None, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (256 + x0 + 1024 * x2), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (512 + x0 + 1024 * x2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (768 + x0 + 1024 * x2), None, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, None) @triton.jit def triton_poi_fused_div_sum_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 64 x2 = xindex // 256 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + (x0 + 256 * x2), None, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (64 + x0 + 256 * x2), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (128 + x0 + 256 * x2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (192 + x0 + 256 * x2), None, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 64), (64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch. float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(4096)](buf0, buf1, 4096, XBLOCK= 256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch. float32) triton_poi_fused__softmax_1[grid(4096)](buf1, buf2, 4096, XBLOCK= 128, num_warps=4, num_stages=1) buf3 = buf1 del buf1 triton_poi_fused_div_sum_2[grid(4096)](buf2, buf3, 4096, XBLOCK=256, num_warps=4, num_stages=1) del buf2 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_3, (64, 4), (1, 64), 0), out=buf4) return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), buf0, buf3, primals_3 class ExternalAttentionNew(nn.Module): def __init__(self, d_model, S=64): super().__init__() self.mk = nn.Linear(d_model, S, bias=False) self.mv = nn.Linear(S, d_model, bias=False) self.softmax = nn.Softmax(dim=1) self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.001) if m.bias is not None: init.constant_(m.bias, 0) def forward(self, input_0): primals_1 = self.mk.weight primals_3 = self.mv.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Nitin-Mane/External-Attention-pytorch
ExternalAttention
false
14,105
[ "MIT" ]
4,466
1ceda306c41063af11c956334747763444a4d83f
https://github.com/Nitin-Mane/External-Attention-pytorch/tree/1ceda306c41063af11c956334747763444a4d83f
GAT
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class GraphAttentionLayer(nn.Module): """ Simple GAT layer, similar to https://arxiv.org/abs/1710.10903 """ def __init__(self, in_features, out_features, dropout, alpha, concat=True): super(GraphAttentionLayer, self).__init__() self.dropout = dropout self.in_features = in_features self.out_features = out_features self.alpha = alpha self.concat = concat self.W = nn.Parameter(nn.init.xavier_uniform_(torch.FloatTensor( in_features, out_features).type(torch.FloatTensor if torch.cuda .is_available() else torch.FloatTensor), gain=np.sqrt(2.0)), requires_grad=True) self.a1 = nn.Parameter(nn.init.xavier_uniform_(torch.FloatTensor( out_features, 1).type(torch.FloatTensor if torch.cuda. is_available() else torch.FloatTensor), gain=np.sqrt(2.0)), requires_grad=True) self.a2 = nn.Parameter(nn.init.xavier_uniform_(torch.FloatTensor( out_features, 1).type(torch.FloatTensor if torch.cuda. is_available() else torch.FloatTensor), gain=np.sqrt(2.0)), requires_grad=True) self.leakyrelu = nn.LeakyReLU(self.alpha) def forward(self, input, adj): h = torch.mm(input, self.W) h.size()[0] f_1 = h @ self.a1 f_2 = h @ self.a2 e = self.leakyrelu(f_1 + f_2.transpose(0, 1)) zero_vec = -9000000000000000.0 * torch.ones_like(e) attention = torch.where(adj > 0, e, zero_vec) attention = F.softmax(attention, dim=1) attention = F.dropout(attention, self.dropout, training=self.training) h_prime = torch.matmul(attention, h) if self.concat: return F.sigmoid(h_prime) else: return h_prime def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GAT(nn.Module): def __init__(self, nfeat, nhid, dropout, alpha, nheads): super(GAT, self).__init__() self.dropout = dropout self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)] for i, attention in enumerate(self.attentions): self.add_module('attention_{}'.format(i), attention) def forward(self, x, adj): x = F.dropout(x, self.dropout, training=self.training) x = torch.cat([att(x, adj) for att in self.attentions], dim=1) x = F.dropout(x, self.dropout, training=self.training) return x def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'nfeat': 4, 'nhid': 4, 'dropout': 0.5, 'alpha': 4, 'nheads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_gt_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_add_leaky_relu_mul_where_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_ptr2 + x0, xmask) tmp3 = tl.load(in_ptr3 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp11 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp12 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp13 = tl.load(in_ptr3 + 1) tmp14 = tl.broadcast_to(tmp13, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp21 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp22 = tl.load(in_ptr3 + 2) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp29 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp30 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp31 = tl.load(in_ptr3 + 3) tmp32 = tl.broadcast_to(tmp31, [XBLOCK]) tmp38 = tl.load(in_ptr4 + 4 * x0, xmask, eviction_policy='evict_last').to( tl.int1) tmp39 = tl.load(in_ptr5 + x0, xmask) tmp40 = tl.load(in_ptr6 + 0) tmp41 = tl.broadcast_to(tmp40, [XBLOCK]) tmp46 = tl.load(in_ptr4 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp47 = tl.load(in_ptr6 + 1) tmp48 = tl.broadcast_to(tmp47, [XBLOCK]) tmp54 = tl.load(in_ptr4 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp55 = tl.load(in_ptr6 + 2) tmp56 = tl.broadcast_to(tmp55, [XBLOCK]) tmp62 = tl.load(in_ptr4 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp63 = tl.load(in_ptr6 + 3) tmp64 = tl.broadcast_to(tmp63, [XBLOCK]) tmp70 = tl.load(in_ptr7 + 4 * x0, xmask, eviction_policy='evict_last').to( tl.int1) tmp71 = tl.load(in_ptr8 + x0, xmask) tmp72 = tl.load(in_ptr9 + 0) tmp73 = tl.broadcast_to(tmp72, [XBLOCK]) tmp78 = tl.load(in_ptr7 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp79 = tl.load(in_ptr9 + 1) tmp80 = tl.broadcast_to(tmp79, [XBLOCK]) tmp86 = tl.load(in_ptr7 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp87 = tl.load(in_ptr9 + 2) tmp88 = tl.broadcast_to(tmp87, [XBLOCK]) tmp94 = tl.load(in_ptr7 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp95 = tl.load(in_ptr9 + 3) tmp96 = tl.broadcast_to(tmp95, [XBLOCK]) tmp102 = tl.load(in_ptr10 + 4 * x0, xmask, eviction_policy='evict_last' ).to(tl.int1) tmp103 = tl.load(in_ptr11 + x0, xmask) tmp104 = tl.load(in_ptr12 + 0) tmp105 = tl.broadcast_to(tmp104, [XBLOCK]) tmp110 = tl.load(in_ptr10 + (1 + 4 * x0), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp111 = tl.load(in_ptr12 + 1) tmp112 = tl.broadcast_to(tmp111, [XBLOCK]) tmp118 = tl.load(in_ptr10 + (2 + 4 * x0), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp119 = tl.load(in_ptr12 + 2) tmp120 = tl.broadcast_to(tmp119, [XBLOCK]) tmp126 = tl.load(in_ptr10 + (3 + 4 * x0), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp127 = tl.load(in_ptr12 + 3) tmp128 = tl.broadcast_to(tmp127, [XBLOCK]) tmp5 = tmp2 + tmp4 tmp6 = 4.0 tmp7 = tmp5 * tmp6 tmp8 = tl.where(tmp1, tmp5, tmp7) tmp9 = -8999999815811072.0 tmp10 = tl.where(tmp0, tmp8, tmp9) tmp15 = tmp2 + tmp14 tmp16 = tmp15 * tmp6 tmp17 = tl.where(tmp12, tmp15, tmp16) tmp18 = tl.where(tmp11, tmp17, tmp9) tmp19 = triton_helpers.maximum(tmp10, tmp18) tmp24 = tmp2 + tmp23 tmp25 = tmp24 * tmp6 tmp26 = tl.where(tmp21, tmp24, tmp25) tmp27 = tl.where(tmp20, tmp26, tmp9) tmp28 = triton_helpers.maximum(tmp19, tmp27) tmp33 = tmp2 + tmp32 tmp34 = tmp33 * tmp6 tmp35 = tl.where(tmp30, tmp33, tmp34) tmp36 = tl.where(tmp29, tmp35, tmp9) tmp37 = triton_helpers.maximum(tmp28, tmp36) tmp42 = tmp39 + tmp41 tmp43 = tmp42 * tmp6 tmp44 = tl.where(tmp38, tmp42, tmp43) tmp45 = tl.where(tmp0, tmp44, tmp9) tmp49 = tmp39 + tmp48 tmp50 = tmp49 * tmp6 tmp51 = tl.where(tmp46, tmp49, tmp50) tmp52 = tl.where(tmp11, tmp51, tmp9) tmp53 = triton_helpers.maximum(tmp45, tmp52) tmp57 = tmp39 + tmp56 tmp58 = tmp57 * tmp6 tmp59 = tl.where(tmp54, tmp57, tmp58) tmp60 = tl.where(tmp20, tmp59, tmp9) tmp61 = triton_helpers.maximum(tmp53, tmp60) tmp65 = tmp39 + tmp64 tmp66 = tmp65 * tmp6 tmp67 = tl.where(tmp62, tmp65, tmp66) tmp68 = tl.where(tmp29, tmp67, tmp9) tmp69 = triton_helpers.maximum(tmp61, tmp68) tmp74 = tmp71 + tmp73 tmp75 = tmp74 * tmp6 tmp76 = tl.where(tmp70, tmp74, tmp75) tmp77 = tl.where(tmp0, tmp76, tmp9) tmp81 = tmp71 + tmp80 tmp82 = tmp81 * tmp6 tmp83 = tl.where(tmp78, tmp81, tmp82) tmp84 = tl.where(tmp11, tmp83, tmp9) tmp85 = triton_helpers.maximum(tmp77, tmp84) tmp89 = tmp71 + tmp88 tmp90 = tmp89 * tmp6 tmp91 = tl.where(tmp86, tmp89, tmp90) tmp92 = tl.where(tmp20, tmp91, tmp9) tmp93 = triton_helpers.maximum(tmp85, tmp92) tmp97 = tmp71 + tmp96 tmp98 = tmp97 * tmp6 tmp99 = tl.where(tmp94, tmp97, tmp98) tmp100 = tl.where(tmp29, tmp99, tmp9) tmp101 = triton_helpers.maximum(tmp93, tmp100) tmp106 = tmp103 + tmp105 tmp107 = tmp106 * tmp6 tmp108 = tl.where(tmp102, tmp106, tmp107) tmp109 = tl.where(tmp0, tmp108, tmp9) tmp113 = tmp103 + tmp112 tmp114 = tmp113 * tmp6 tmp115 = tl.where(tmp110, tmp113, tmp114) tmp116 = tl.where(tmp11, tmp115, tmp9) tmp117 = triton_helpers.maximum(tmp109, tmp116) tmp121 = tmp103 + tmp120 tmp122 = tmp121 * tmp6 tmp123 = tl.where(tmp118, tmp121, tmp122) tmp124 = tl.where(tmp20, tmp123, tmp9) tmp125 = triton_helpers.maximum(tmp117, tmp124) tmp129 = tmp103 + tmp128 tmp130 = tmp129 * tmp6 tmp131 = tl.where(tmp126, tmp129, tmp130) tmp132 = tl.where(tmp29, tmp131, tmp9) tmp133 = triton_helpers.maximum(tmp125, tmp132) tl.store(out_ptr0 + x0, tmp37, xmask) tl.store(out_ptr1 + x0, tmp69, xmask) tl.store(out_ptr2 + x0, tmp101, xmask) tl.store(out_ptr3 + x0, tmp133, xmask) @triton.jit def triton_poi_fused__softmax_add_leaky_relu_mul_where_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + x2, xmask).to(tl.int1) tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + x2, xmask).to(tl.int1) tmp14 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr7 + x0, xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr8 + x1, xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr9 + x2, xmask).to(tl.int1) tmp24 = tl.load(in_ptr10 + x1, xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr11 + x0, xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr12 + x1, xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr13 + x2, xmask).to(tl.int1) tmp34 = tl.load(in_ptr14 + x1, xmask, eviction_policy='evict_last') tmp35 = tl.load(in_ptr15 + x0, xmask, eviction_policy='evict_last') tmp40 = tl.load(in_ptr16 + x1, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp5 = 4.0 tmp6 = tmp4 * tmp5 tmp7 = tl.where(tmp1, tmp4, tmp6) tmp8 = -8999999815811072.0 tmp9 = tl.where(tmp0, tmp7, tmp8) tmp11 = tmp9 - tmp10 tmp12 = tl_math.exp(tmp11) tmp16 = tmp14 + tmp15 tmp17 = tmp16 * tmp5 tmp18 = tl.where(tmp13, tmp16, tmp17) tmp19 = tl.where(tmp0, tmp18, tmp8) tmp21 = tmp19 - tmp20 tmp22 = tl_math.exp(tmp21) tmp26 = tmp24 + tmp25 tmp27 = tmp26 * tmp5 tmp28 = tl.where(tmp23, tmp26, tmp27) tmp29 = tl.where(tmp0, tmp28, tmp8) tmp31 = tmp29 - tmp30 tmp32 = tl_math.exp(tmp31) tmp36 = tmp34 + tmp35 tmp37 = tmp36 * tmp5 tmp38 = tl.where(tmp33, tmp36, tmp37) tmp39 = tl.where(tmp0, tmp38, tmp8) tmp41 = tmp39 - tmp40 tmp42 = tl_math.exp(tmp41) tl.store(out_ptr0 + x2, tmp12, xmask) tl.store(out_ptr1 + x2, tmp22, xmask) tl.store(out_ptr2 + x2, tmp32, xmask) tl.store(out_ptr3 + x2, tmp42, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.sigmoid(tmp5) tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype) tmp8 = tl.where(tmp4, tmp6, tmp7) tmp9 = tmp0 >= tmp3 tmp10 = tl.full([1], 8, tl.int64) tmp11 = tmp0 < tmp10 tmp12 = tmp9 & tmp11 tmp13 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.sigmoid(tmp13) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp12, tmp14, tmp15) tmp17 = tmp0 >= tmp10 tmp18 = tl.full([1], 12, tl.int64) tmp19 = tmp0 < tmp18 tmp20 = tmp17 & tmp19 tmp21 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp22 = tl.sigmoid(tmp21) tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype) tmp24 = tl.where(tmp20, tmp22, tmp23) tmp25 = tmp0 >= tmp18 tl.full([1], 16, tl.int64) tmp28 = tl.load(in_ptr3 + (4 * x1 + (-12 + x0)), tmp25 & xmask, eviction_policy='evict_last', other=0.0) tmp29 = tl.sigmoid(tmp28) tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype) tmp31 = tl.where(tmp25, tmp29, tmp30) tmp32 = tl.where(tmp20, tmp24, tmp31) tmp33 = tl.where(tmp12, tmp16, tmp32) tmp34 = tl.where(tmp4, tmp8, tmp33) tl.store(out_ptr0 + x2, tmp34, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 1), (1, 1)) assert_size_stride(primals_4, (4, 1), (1, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, 1), (1, 1)) assert_size_stride(primals_8, (4, 1), (1, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, 1), (1, 1)) assert_size_stride(primals_11, (4, 1), (1, 1)) assert_size_stride(primals_12, (4, 4), (4, 1)) assert_size_stride(primals_13, (4, 1), (1, 1)) assert_size_stride(primals_14, (4, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf0, primals_3, out=buf1) buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf0, primals_4, out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_add_leaky_relu_0[grid(16)](buf1, buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_gt_1[grid(16)](primals_5, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_6, out=buf9) del primals_6 buf10 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf9, primals_7, out=buf10) buf11 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf9, primals_8, out=buf11) buf12 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_add_leaky_relu_0[grid(16)](buf10, buf11, buf12, 16, XBLOCK=16, num_warps=1, num_stages=1) buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_9, out=buf17) del primals_9 buf18 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf17, primals_10, out=buf18) buf19 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf17, primals_11, out=buf19) buf20 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_add_leaky_relu_0[grid(16)](buf18, buf19, buf20, 16, XBLOCK=16, num_warps=1, num_stages=1) buf25 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_12, out=buf25) del primals_12 buf26 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf25, primals_13, out=buf26) buf27 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf25, primals_14, out=buf27) buf28 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_add_leaky_relu_0[grid(16)](buf26, buf27, buf28, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf13 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf21 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf29 = empty_strided_cuda((4, 1), (1, 4), torch.float32) triton_poi_fused__softmax_add_leaky_relu_mul_where_2[grid(4)](buf4, buf3, buf1, buf2, buf12, buf10, buf11, buf20, buf18, buf19, buf28, buf26, buf27, buf5, buf13, buf21, buf29, 4, XBLOCK=4, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf22 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf30 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_add_leaky_relu_mul_where_3[grid(16)](buf4, buf3, buf1, buf2, buf5, buf12, buf10, buf11, buf13, buf20, buf18, buf19, buf21, buf28, buf26, buf27, buf29, buf6, buf14, buf22, buf30, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf1 del buf10 del buf11 del buf13 del buf18 del buf19 del buf2 del buf21 del buf26 del buf27 del buf29 del buf5 buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_4[grid(16)](buf6, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) buf8 = buf6 del buf6 extern_kernels.mm(buf7, buf0, out=buf8) buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_4[grid(16)](buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = buf14 del buf14 extern_kernels.mm(buf15, buf9, out=buf16) buf23 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_4[grid(16)](buf22, buf23, 16, XBLOCK=16, num_warps=1, num_stages=1) buf24 = buf22 del buf22 extern_kernels.mm(buf23, buf17, out=buf24) buf31 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_4[grid(16)](buf30, buf31, 16, XBLOCK=16, num_warps=1, num_stages=1) buf32 = buf30 del buf30 extern_kernels.mm(buf31, buf25, out=buf32) buf33 = empty_strided_cuda((4, 16), (16, 1), torch.float32) triton_poi_fused_cat_5[grid(64)](buf8, buf16, buf24, buf32, buf33, 64, XBLOCK=64, num_warps=1, num_stages=1) return (buf33, buf3, buf4, buf7, buf8, buf12, buf15, buf16, buf20, buf23, buf24, buf28, buf31, buf32, reinterpret_tensor(buf25, (4, 4), (1, 4), 0), reinterpret_tensor(primals_14, (1, 4), (1, 1), 0), reinterpret_tensor(primals_13, (1, 4), (1, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), reinterpret_tensor(buf17, (4, 4), (1, 4), 0), reinterpret_tensor( primals_11, (1, 4), (1, 1), 0), reinterpret_tensor(primals_10, (1, 4), (1, 1), 0), reinterpret_tensor(buf9, (4, 4), (1, 4), 0), reinterpret_tensor(primals_8, (1, 4), (1, 1), 0), reinterpret_tensor(primals_7, (1, 4), (1, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), reinterpret_tensor( primals_4, (1, 4), (1, 1), 0), reinterpret_tensor(primals_3, (1, 4), (1, 1), 0)) class GraphAttentionLayer(nn.Module): """ Simple GAT layer, similar to https://arxiv.org/abs/1710.10903 """ def __init__(self, in_features, out_features, dropout, alpha, concat=True): super(GraphAttentionLayer, self).__init__() self.dropout = dropout self.in_features = in_features self.out_features = out_features self.alpha = alpha self.concat = concat self.W = nn.Parameter(nn.init.xavier_uniform_(torch.FloatTensor( in_features, out_features).type(torch.FloatTensor if torch.cuda .is_available() else torch.FloatTensor), gain=np.sqrt(2.0)), requires_grad=True) self.a1 = nn.Parameter(nn.init.xavier_uniform_(torch.FloatTensor( out_features, 1).type(torch.FloatTensor if torch.cuda. is_available() else torch.FloatTensor), gain=np.sqrt(2.0)), requires_grad=True) self.a2 = nn.Parameter(nn.init.xavier_uniform_(torch.FloatTensor( out_features, 1).type(torch.FloatTensor if torch.cuda. is_available() else torch.FloatTensor), gain=np.sqrt(2.0)), requires_grad=True) self.leakyrelu = nn.LeakyReLU(self.alpha) def forward(self, input, adj): h = torch.mm(input, self.W) h.size()[0] f_1 = h @ self.a1 f_2 = h @ self.a2 e = self.leakyrelu(f_1 + f_2.transpose(0, 1)) zero_vec = -9000000000000000.0 * torch.ones_like(e) attention = torch.where(adj > 0, e, zero_vec) attention = F.softmax(attention, dim=1) attention = F.dropout(attention, self.dropout, training=self.training) h_prime = torch.matmul(attention, h) if self.concat: return F.sigmoid(h_prime) else: return h_prime def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GATNew(nn.Module): def __init__(self, nfeat, nhid, dropout, alpha, nheads): super(GATNew, self).__init__() self.dropout = dropout self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)] for i, attention in enumerate(self.attentions): self.add_module('attention_{}'.format(i), attention) def forward(self, input_0, input_1): primals_1 = self.attention_0.W primals_3 = self.attention_0.a1 primals_4 = self.attention_0.a2 primals_2 = self.attention_1.W primals_7 = self.attention_1.a1 primals_8 = self.attention_1.a2 primals_5 = self.attention_2.W primals_10 = self.attention_2.a1 primals_11 = self.attention_2.a2 primals_6 = self.attention_3.W primals_13 = self.attention_3.a1 primals_14 = self.attention_3.a2 primals_9 = input_0 primals_12 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14]) return output[0]
EagleW/PaperRobot-Incremental-Draft-Generation-of-Scientific-Ideas
GAT
false
14,106
[ "MIT" ]
453
a338abf3974ba9ce916ae846835063a42b9e6689
https://github.com/EagleW/PaperRobot-Incremental-Draft-Generation-of-Scientific-Ideas/tree/a338abf3974ba9ce916ae846835063a42b9e6689
DoubleAttention
import torch from torch import nn from torch.nn import functional as F from torch.nn import init class DoubleAttention(nn.Module): def __init__(self, in_channels, c_m, c_n, reconstruct=True): super().__init__() self.in_channels = in_channels self.reconstruct = reconstruct self.c_m = c_m self.c_n = c_n self.convA = nn.Conv2d(in_channels, c_m, 1) self.convB = nn.Conv2d(in_channels, c_n, 1) self.convV = nn.Conv2d(in_channels, c_n, 1) if self.reconstruct: self.conv_reconstruct = nn.Conv2d(c_m, in_channels, kernel_size=1) self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.001) if m.bias is not None: init.constant_(m.bias, 0) def forward(self, x): b, c, h, w = x.shape assert c == self.in_channels A = self.convA(x) B = self.convB(x) V = self.convV(x) tmpA = A.view(b, self.c_m, -1) attention_maps = F.softmax(B.view(b, self.c_n, -1)) attention_vectors = F.softmax(V.view(b, self.c_n, -1)) global_descriptors = torch.bmm(tmpA, attention_maps.permute(0, 2, 1)) tmpZ = global_descriptors.matmul(attention_vectors) tmpZ = tmpZ.view(b, self.c_m, h, w) if self.reconstruct: tmpZ = self.conv_reconstruct(tmpZ) return tmpZ def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'c_m': 4, 'c_n': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn from torch.nn import init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (64 + x2), xmask) tmp6 = tl.load(in_ptr0 + (128 + x2), xmask) tmp9 = tl.load(in_ptr0 + (192 + x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = triton_helpers.maximum(tmp2, tmp4) tmp7 = tmp6 + tmp1 tmp8 = triton_helpers.maximum(tmp5, tmp7) tmp10 = tmp9 + tmp1 tmp11 = triton_helpers.maximum(tmp8, tmp10) tmp12 = tmp2 - tmp11 tmp13 = tl_math.exp(tmp12) tmp14 = tmp4 - tmp11 tmp15 = tl_math.exp(tmp14) tmp16 = tmp13 + tmp15 tmp17 = tmp7 - tmp11 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp10 - tmp11 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tl.store(out_ptr0 + x2, tmp11, xmask) tl.store(out_ptr1 + x2, tmp22, xmask) @triton.jit def triton_poi_fused__softmax_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 x4 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tl.store(in_out_ptr0 + x3, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = extern_kernels.convolution(primals_1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = extern_kernels.convolution(primals_1, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(256)](buf3, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf4 = empty_strided_cuda((1, 4, 16), (64, 16, 1), torch.float32) buf5 = empty_strided_cuda((1, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused__softmax_1[grid(64)](buf1, primals_5, buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = reinterpret_tensor(buf1, (4, 4, 16), (64, 16, 1), 0) del buf1 triton_poi_fused__softmax_2[grid(256)](buf6, primals_5, buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf7 = buf5 del buf5 buf8 = buf4 del buf4 triton_poi_fused__softmax_1[grid(64)](buf2, primals_7, buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf2, (4, 4, 16), (64, 16, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(256)](buf9, primals_7, buf7, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf7 del primals_7 buf10 = reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0) del buf8 extern_kernels.bmm(reinterpret_tensor(buf3, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(buf6, (4, 16, 4), (64, 1, 16), 0), out=buf10 ) buf11 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) extern_kernels.bmm(buf10, buf9, out=buf11) buf12 = extern_kernels.convolution(reinterpret_tensor(buf11, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 4, 4, 4), (64, 16, 4, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_0[grid(256)](buf13, primals_9, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 return (buf13, primals_1, primals_2, primals_4, primals_6, primals_8, buf6, buf9, reinterpret_tensor(buf11, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf10, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf3, (4, 16, 4), (64, 1, 16), 0)) class DoubleAttentionNew(nn.Module): def __init__(self, in_channels, c_m, c_n, reconstruct=True): super().__init__() self.in_channels = in_channels self.reconstruct = reconstruct self.c_m = c_m self.c_n = c_n self.convA = nn.Conv2d(in_channels, c_m, 1) self.convB = nn.Conv2d(in_channels, c_n, 1) self.convV = nn.Conv2d(in_channels, c_n, 1) if self.reconstruct: self.conv_reconstruct = nn.Conv2d(c_m, in_channels, kernel_size=1) self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.001) if m.bias is not None: init.constant_(m.bias, 0) def forward(self, input_0): primals_2 = self.convA.weight primals_3 = self.convA.bias primals_4 = self.convB.weight primals_5 = self.convB.bias primals_6 = self.convV.weight primals_7 = self.convV.bias primals_8 = self.conv_reconstruct.weight primals_9 = self.conv_reconstruct.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
Nitin-Mane/External-Attention-pytorch
DoubleAttention
false
14,107
[ "MIT" ]
4,466
1ceda306c41063af11c956334747763444a4d83f
https://github.com/Nitin-Mane/External-Attention-pytorch/tree/1ceda306c41063af11c956334747763444a4d83f
SineGen
import torch import numpy as np import torch.utils.data import torch.nn as torch_nn class SineGen(torch_nn.Module): """ Definition of sine generator SineGen(samp_rate, harmonic_num = 0, sine_amp = 0.1, noise_std = 0.003, voiced_threshold = 0, flag_for_pulse=False) samp_rate: sampling rate in Hz harmonic_num: number of harmonic overtones (default 0) sine_amp: amplitude of sine-wavefrom (default 0.1) noise_std: std of Gaussian noise (default 0.003) voiced_thoreshold: F0 threshold for U/V classification (default 0) flag_for_pulse: this SinGen is used inside PulseGen (default False) Note: when flag_for_pulse is True, the first time step of a voiced segment is always sin(np.pi) or cos(0) """ def __init__(self, samp_rate, harmonic_num=0, sine_amp=0.1, noise_std= 0.003, voiced_threshold=0, flag_for_pulse=False): super(SineGen, self).__init__() self.sine_amp = sine_amp self.noise_std = noise_std self.harmonic_num = harmonic_num self.dim = self.harmonic_num + 1 self.sampling_rate = samp_rate self.voiced_threshold = voiced_threshold self.flag_for_pulse = flag_for_pulse def _f02uv(self, f0): uv = torch.ones_like(f0) uv = uv * (f0 > self.voiced_threshold) return uv def _f02sine(self, f0_values): """ f0_values: (batchsize, length, dim) where dim indicates fundamental tone and overtones """ rad_values = f0_values / self.sampling_rate % 1 rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], device=f0_values.device) rand_ini[:, 0] = 0 rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini if not self.flag_for_pulse: tmp_over_one = torch.cumsum(rad_values, 1) % 1 tmp_over_one_idx = tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, : ] < 0 cumsum_shift = torch.zeros_like(rad_values) cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1 ) * 2 * np.pi) else: uv = self._f02uv(f0_values) uv_1 = torch.roll(uv, shifts=-1, dims=1) uv_1[:, -1, :] = 1 u_loc = (uv < 1) * (uv_1 > 0) tmp_cumsum = torch.cumsum(rad_values, dim=1) for idx in range(f0_values.shape[0]): temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :] temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :] tmp_cumsum[idx, :, :] = 0 tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1) sines = torch.cos(i_phase * 2 * np.pi) return sines def forward(self, f0): """ sine_tensor, uv = forward(f0) input F0: tensor(batchsize=1, length, dim=1) f0 for unvoiced steps should be 0 output sine_tensor: tensor(batchsize=1, length, dim) output uv: tensor(batchsize=1, length, 1) """ with torch.no_grad(): f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device =f0.device) f0_buf[:, :, 0] = f0[:, :, 0] for idx in np.arange(self.harmonic_num): f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (idx + 2) sine_waves = self._f02sine(f0_buf) * self.sine_amp uv = self._f02uv(f0) noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 noise = noise_amp * torch.randn_like(sine_waves) sine_waves = sine_waves * uv + noise return sine_waves, uv, noise def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'samp_rate': 4}]
import torch from torch import device import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np import torch.utils.data import torch.nn as torch_nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton.jit def triton_per_fused_add_copy_cumsum_div_fill_lift_fresh_remainder_zeros_0( in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp4 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (4 * r1 + 16 * x0), xmask, eviction_policy= 'evict_last', other=0.0) tmp0 = r1 tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = tmp0 == tmp1 tmp3 = tmp1 == tmp1 tmp5 = 0.0 tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = 0.25 tmp8 = tmp6 * tmp7 tmp9 = 1.0 tmp10 = tmp8 % tmp9 tmp11 = tmp10 != tmp1 tmp12 = libdevice.signbit(tmp10 ) if tmp10.dtype is tl.float32 else tmp10 < 0 tmp13 = libdevice.signbit(tmp9) if tmp9.dtype is tl.float32 else tmp9 < 0 tmp14 = tmp12 != tmp13 tmp15 = tmp11 & tmp14 tmp16 = tmp10 + tmp9 tmp17 = tl.where(tmp15, tmp16, tmp10) tmp19 = tl.where(tmp3, tmp5, tmp18) tmp20 = tmp17 + tmp19 tmp22 = tl.where(tmp3, tmp21, tmp5) tmp23 = tmp22 * tmp7 tmp24 = tmp23 % tmp9 tmp25 = tmp24 != tmp1 tmp26 = libdevice.signbit(tmp24 ) if tmp24.dtype is tl.float32 else tmp24 < 0 tmp27 = tmp26 != tmp13 tmp28 = tmp25 & tmp27 tmp29 = tmp24 + tmp9 tmp30 = tl.where(tmp28, tmp29, tmp24) tmp31 = tl.where(tmp2, tmp20, tmp30) tmp32 = tmp31.to(tl.float32) tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp34, = tl.associative_scan((tmp33,), 1, _triton_helper_fn_add0) tl.store(out_ptr0 + (r1 + 4 * x0), tmp34, xmask) @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton.jit def triton_per_fused_add_copy_cumsum_div_fill_lift_fresh_lt_mul_remainder_sub_zeros_zeros_like_1( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl. constexpr): xnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp4 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (4 * r1 + 16 * x0), xmask, eviction_policy= 'evict_last', other=0.0) tmp0 = r1 tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = tmp0 == tmp1 tmp3 = tmp1 == tmp1 tmp5 = 0.0 tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = 0.25 tmp8 = tmp6 * tmp7 tmp9 = 1.0 tmp10 = tmp8 % tmp9 tmp11 = tmp10 != tmp1 tmp12 = libdevice.signbit(tmp10 ) if tmp10.dtype is tl.float32 else tmp10 < 0 tmp13 = libdevice.signbit(tmp9) if tmp9.dtype is tl.float32 else tmp9 < 0 tmp14 = tmp12 != tmp13 tmp15 = tmp11 & tmp14 tmp16 = tmp10 + tmp9 tmp17 = tl.where(tmp15, tmp16, tmp10) tmp19 = tl.where(tmp3, tmp5, tmp18) tmp20 = tmp17 + tmp19 tmp22 = tl.where(tmp3, tmp21, tmp5) tmp23 = tmp22 * tmp7 tmp24 = tmp23 % tmp9 tmp25 = tmp24 != tmp1 tmp26 = libdevice.signbit(tmp24 ) if tmp24.dtype is tl.float32 else tmp24 < 0 tmp27 = tmp26 != tmp13 tmp28 = tmp25 & tmp27 tmp29 = tmp24 + tmp9 tmp30 = tl.where(tmp28, tmp29, tmp24) tmp31 = tl.where(tmp2, tmp20, tmp30) tmp32 = tl.full([1, 1], 1, tl.int64) tmp33 = tmp0 >= tmp32 tmp34 = tl.load(in_ptr2 + (r1 + 4 * x0), tmp33 & xmask, other=0.0) tmp35 = tmp34 % tmp9 tmp36 = tmp35 != tmp1 tmp37 = libdevice.signbit(tmp35 ) if tmp35.dtype is tl.float32 else tmp35 < 0 tmp38 = tmp37 != tmp13 tmp39 = tmp36 & tmp38 tmp40 = tmp35 + tmp9 tmp41 = tl.where(tmp39, tmp40, tmp35) tmp42 = tl.load(in_ptr2 + (-1 + r1 + 4 * x0), tmp33 & xmask, other=0.0) tmp43 = tmp42 % tmp9 tmp44 = tmp43 != tmp1 tmp45 = libdevice.signbit(tmp43 ) if tmp43.dtype is tl.float32 else tmp43 < 0 tmp46 = tmp45 != tmp13 tmp47 = tmp44 & tmp46 tmp48 = tmp43 + tmp9 tmp49 = tl.where(tmp47, tmp48, tmp43) tmp50 = tmp41 - tmp49 tmp51 = tmp50 < tmp5 tmp52 = tmp51.to(tl.float32) tmp53 = -1.0 tmp54 = tmp52 * tmp53 tmp55 = tl.full(tmp54.shape, 0.0, tmp54.dtype) tmp56 = tl.where(tmp33, tmp54, tmp55) tmp57 = tl.where(tmp33, tmp56, tmp5) tmp58 = tmp31 + tmp57 tmp59 = tmp58.to(tl.float32) tmp60 = tl.broadcast_to(tmp59, [XBLOCK, RBLOCK]) tmp61, = tl.associative_scan((tmp60,), 1, _triton_helper_fn_add0) tl.store(in_out_ptr0 + (r1 + 4 * x0), tmp61, xmask) @triton.jit def triton_poi_fused_add_div_gt_mul_rsub_sin_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex x2 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask) tmp13 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = tmp2.to(tl.float32) tmp4 = 0.003 tmp5 = tmp3 * tmp4 tmp6 = 1.0 tmp7 = tmp6 - tmp3 tmp8 = 0.1 tmp9 = tmp7 * tmp8 tmp10 = 0.3333333333333333 tmp11 = tmp9 * tmp10 tmp12 = tmp5 + tmp11 tmp14 = tmp12 * tmp13 tmp16 = 2.0 tmp17 = tmp15 * tmp16 tmp18 = 3.141592653589793 tmp19 = tmp17 * tmp18 tmp20 = tl_math.sin(tmp19) tmp21 = tmp20 * tmp8 tmp22 = tmp21 * tmp3 tmp23 = tmp22 + tmp14 tl.store(out_ptr0 + x0, tmp3, xmask) tl.store(out_ptr1 + x0, tmp14, xmask) tl.store(out_ptr2 + x0, tmp23, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten.rand.default([4, 1], device=device(type= 'cuda', index=0), pin_memory=False) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_per_fused_add_copy_cumsum_div_fill_lift_fresh_remainder_zeros_0[ grid(4)](arg0_1, buf1, buf2, 4, 4, XBLOCK=1, num_warps=2, num_stages=1) buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf4 = buf3 del buf3 triton_per_fused_add_copy_cumsum_div_fill_lift_fresh_lt_mul_remainder_sub_zeros_zeros_like_1[ grid(4)](buf4, arg0_1, buf1, buf2, 4, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf1 del buf2 buf6 = torch.ops.aten.randn.default([4, 4, 1], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf7 = buf6 del buf6 buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_gt_mul_rsub_sin_2[grid(64)](arg0_1, buf7, buf4, buf5, buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del buf4 del buf7 return buf9, buf5, buf8 class SineGenNew(torch_nn.Module): """ Definition of sine generator SineGen(samp_rate, harmonic_num = 0, sine_amp = 0.1, noise_std = 0.003, voiced_threshold = 0, flag_for_pulse=False) samp_rate: sampling rate in Hz harmonic_num: number of harmonic overtones (default 0) sine_amp: amplitude of sine-wavefrom (default 0.1) noise_std: std of Gaussian noise (default 0.003) voiced_thoreshold: F0 threshold for U/V classification (default 0) flag_for_pulse: this SinGen is used inside PulseGen (default False) Note: when flag_for_pulse is True, the first time step of a voiced segment is always sin(np.pi) or cos(0) """ def __init__(self, samp_rate, harmonic_num=0, sine_amp=0.1, noise_std= 0.003, voiced_threshold=0, flag_for_pulse=False): super(SineGenNew, self).__init__() self.sine_amp = sine_amp self.noise_std = noise_std self.harmonic_num = harmonic_num self.dim = self.harmonic_num + 1 self.sampling_rate = samp_rate self.voiced_threshold = voiced_threshold self.flag_for_pulse = flag_for_pulse def _f02uv(self, f0): uv = torch.ones_like(f0) uv = uv * (f0 > self.voiced_threshold) return uv def _f02sine(self, f0_values): """ f0_values: (batchsize, length, dim) where dim indicates fundamental tone and overtones """ rad_values = f0_values / self.sampling_rate % 1 rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], device=f0_values.device) rand_ini[:, 0] = 0 rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini if not self.flag_for_pulse: tmp_over_one = torch.cumsum(rad_values, 1) % 1 tmp_over_one_idx = tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, : ] < 0 cumsum_shift = torch.zeros_like(rad_values) cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1 ) * 2 * np.pi) else: uv = self._f02uv(f0_values) uv_1 = torch.roll(uv, shifts=-1, dims=1) uv_1[:, -1, :] = 1 u_loc = (uv < 1) * (uv_1 > 0) tmp_cumsum = torch.cumsum(rad_values, dim=1) for idx in range(f0_values.shape[0]): temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :] temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :] tmp_cumsum[idx, :, :] = 0 tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1) sines = torch.cos(i_phase * 2 * np.pi) return sines def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0], output[1], output[2]
Ninushkat/Impact-Synth-Hardware
SineGen
false
14,108
[ "MIT" ]
55
37a2ecfec51b052b39d1ad0d4676f09d5f00e3c2
https://github.com/Ninushkat/Impact-Synth-Hardware/tree/37a2ecfec51b052b39d1ad0d4676f09d5f00e3c2
SpatialGroupEnhance
import torch from torch import nn from torch.nn import init class SpatialGroupEnhance(nn.Module): def __init__(self, groups): super().__init__() self.groups = groups self.avg_pool = nn.AdaptiveAvgPool2d(1) self.weight = nn.Parameter(torch.zeros(1, groups, 1, 1)) self.bias = nn.Parameter(torch.zeros(1, groups, 1, 1)) self.sig = nn.Sigmoid() self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.001) if m.bias is not None: init.constant_(m.bias, 0) def forward(self, x): b, c, h, w = x.shape x = x.view(b * self.groups, -1, h, w) xn = x * self.avg_pool(x) xn = xn.sum(dim=1, keepdim=True) t = xn.view(b * self.groups, -1) t = t - t.mean(dim=1, keepdim=True) std = t.std(dim=1, keepdim=True) + 1e-05 t = t / std t = t.view(b, self.groups, h, w) t = t * self.weight + self.bias t = t.view(b * self.groups, 1, h, w) x = x * self.sig(t) x = x.view(b, c, h, w) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'groups': 1}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn from torch.nn import init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_per_fused_add_mean_mul_sigmoid_std_sub_sum_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + r1 + 64 * x0), xmask, other=0.0) tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (32 + r1 + 64 * x0), xmask, other=0.0) tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (48 + r1 + 64 * x0), xmask, other=0.0) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp44 = tl.load(in_ptr2 + 0) tmp45 = tl.broadcast_to(tmp44, [XBLOCK, RBLOCK]) tmp47 = tl.load(in_ptr3 + 0) tmp48 = tl.broadcast_to(tmp47, [XBLOCK, RBLOCK]) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 16.0 tmp20 = tmp18 / tmp19 tmp21 = tmp14 - tmp20 tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK]) tl.where(xmask, tmp22, 0) tmp25 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK]) tmp27 = tl.where(xmask, tmp25, 0) tmp28 = tl.sum(tmp27, 1)[:, None] tmp29 = tl.full([XBLOCK, 1], 16, tl.int32) tmp30 = tmp29.to(tl.float32) tmp31 = tmp28 / tmp30 tmp32 = tmp22 - tmp31 tmp33 = tmp32 * tmp32 tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK]) tmp36 = tl.where(xmask, tmp34, 0) tmp37 = tl.sum(tmp36, 1)[:, None] tmp38 = 15.0 tmp39 = tmp37 / tmp38 tmp40 = libdevice.sqrt(tmp39) tmp41 = 1e-05 tmp42 = tmp40 + tmp41 tmp43 = tmp21 / tmp42 tmp46 = tmp43 * tmp45 tmp49 = tmp46 + tmp48 tmp50 = tl.sigmoid(tmp49) tl.store(out_ptr0 + (r1 + 16 * x0), tmp14, xmask) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp20, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp42, xmask) tl.store(out_ptr1 + (r1 + 16 * x0), tmp50, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_3, (1, 1, 1, 1), (1, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) buf2 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf4 = reinterpret_tensor(buf3, (4, 1), (1, 1), 0) del buf3 buf6 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf8 = reinterpret_tensor(buf6, (4, 1), (1, 1), 0) del buf6 buf9 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) triton_per_fused_add_mean_mul_sigmoid_std_sub_sum_1[grid(4)](buf4, buf8, primals_1, buf1, primals_2, primals_3, buf2, buf9, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf2 buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_2[grid(256)](primals_1, buf9, buf10, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf9 return buf10, primals_1, primals_2, primals_3, buf1, buf4, buf8 class SpatialGroupEnhanceNew(nn.Module): def __init__(self, groups): super().__init__() self.groups = groups self.avg_pool = nn.AdaptiveAvgPool2d(1) self.weight = nn.Parameter(torch.zeros(1, groups, 1, 1)) self.bias = nn.Parameter(torch.zeros(1, groups, 1, 1)) self.sig = nn.Sigmoid() self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.001) if m.bias is not None: init.constant_(m.bias, 0) def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Nitin-Mane/External-Attention-pytorch
SpatialGroupEnhance
false
14,109
[ "MIT" ]
4,466
1ceda306c41063af11c956334747763444a4d83f
https://github.com/Nitin-Mane/External-Attention-pytorch/tree/1ceda306c41063af11c956334747763444a4d83f
SimplifiedScaledDotProductAttention
import torch import numpy as np from torch import nn from torch.nn import init class SimplifiedScaledDotProductAttention(nn.Module): """ Scaled dot-product attention """ def __init__(self, d_model, h, dropout=0.1): """ :param d_model: Output dimensionality of the model :param d_k: Dimensionality of queries and keys :param d_v: Dimensionality of values :param h: Number of heads """ super(SimplifiedScaledDotProductAttention, self).__init__() self.d_model = d_model self.d_k = d_model // h self.d_v = d_model // h self.h = h self.fc_o = nn.Linear(h * self.d_v, d_model) self.dropout = nn.Dropout(dropout) self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.001) if m.bias is not None: init.constant_(m.bias, 0) def forward(self, queries, keys, values, attention_mask=None, attention_weights=None): """ Computes :param queries: Queries (b_s, nq, d_model) :param keys: Keys (b_s, nk, d_model) :param values: Values (b_s, nk, d_model) :param attention_mask: Mask over attention values (b_s, h, nq, nk). True indicates masking. :param attention_weights: Multiplicative weights for attention values (b_s, h, nq, nk). :return: """ b_s, nq = queries.shape[:2] nk = keys.shape[1] q = queries.view(b_s, nq, self.h, self.d_k).permute(0, 2, 1, 3) k = keys.view(b_s, nk, self.h, self.d_k).permute(0, 2, 3, 1) v = values.view(b_s, nk, self.h, self.d_v).permute(0, 2, 1, 3) att = torch.matmul(q, k) / np.sqrt(self.d_k) if attention_weights is not None: att = att * attention_weights if attention_mask is not None: att = att.masked_fill(attention_mask, -np.inf) att = torch.softmax(att, -1) att = self.dropout(att) out = torch.matmul(att, v).permute(0, 2, 1, 3).contiguous().view(b_s, nq, self.h * self.d_v) out = self.fc_o(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 1]), torch.rand([4, 4, 4, 1]), torch.rand( [4, 4, 4, 1])] def get_init_inputs(): return [[], {'d_model': 4, 'h': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn from torch.nn import init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 1), (16, 4, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 1), (16, 4, 1, 1)) assert_size_stride(primals_3, (4, 4, 4, 1), (16, 4, 1, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) triton_poi_fused_clone_0[grid(16, 4)](primals_2, buf1, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf1, (16, 1, 4), (4, 0, 1), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf3 buf5 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 triton_poi_fused_clone_0[grid(16, 4)](primals_3, buf5, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf6 = reinterpret_tensor(buf0, (16, 4, 1), (4, 1, 1), 0) del buf0 extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 0), 0), out=buf6) del buf4 buf7 = buf5 del buf5 triton_poi_fused_clone_0[grid(16, 4)](buf6, buf7, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf8 = reinterpret_tensor(buf6, (16, 4), (4, 1), 0) del buf6 extern_kernels.addmm(primals_5, reinterpret_tensor(buf7, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8) del primals_4 del primals_5 return reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(buf7, (16, 4), (4, 1), 0) class SimplifiedScaledDotProductAttentionNew(nn.Module): """ Scaled dot-product attention """ def __init__(self, d_model, h, dropout=0.1): """ :param d_model: Output dimensionality of the model :param d_k: Dimensionality of queries and keys :param d_v: Dimensionality of values :param h: Number of heads """ super(SimplifiedScaledDotProductAttentionNew, self).__init__() self.d_model = d_model self.d_k = d_model // h self.d_v = d_model // h self.h = h self.fc_o = nn.Linear(h * self.d_v, d_model) self.dropout = nn.Dropout(dropout) self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.001) if m.bias is not None: init.constant_(m.bias, 0) def forward(self, input_0, input_1, input_2): primals_4 = self.fc_o.weight primals_5 = self.fc_o.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Nitin-Mane/External-Attention-pytorch
SimplifiedScaledDotProductAttention
false
14,110
[ "MIT" ]
4,466
1ceda306c41063af11c956334747763444a4d83f
https://github.com/Nitin-Mane/External-Attention-pytorch/tree/1ceda306c41063af11c956334747763444a4d83f
Conv1dKeepLength
import torch import torch.utils.data import torch.nn as torch_nn import torch.nn.functional as torch_nn_func class Conv1dKeepLength(torch_nn.Conv1d): """ Wrapper for causal convolution Input tensor: (batchsize=1, length, dim_in) Output tensor: (batchsize=1, length, dim_out) https://github.com/pytorch/pytorch/issues/1333 Note: Tanh is optional """ def __init__(self, input_dim, output_dim, dilation_s, kernel_s, causal= False, stride=1, groups=1, bias=True, tanh=True, pad_mode='constant'): super(Conv1dKeepLength, self).__init__(input_dim, output_dim, kernel_s, stride=stride, padding=0, dilation=dilation_s, groups =groups, bias=bias) self.pad_mode = pad_mode self.causal = causal if self.causal: self.pad_le = dilation_s * (kernel_s - 1) self.pad_ri = 0 else: self.pad_le = dilation_s * (kernel_s - 1) // 2 self.pad_ri = dilation_s * (kernel_s - 1) - self.pad_le if tanh: self.l_ac = torch_nn.Tanh() else: self.l_ac = torch_nn.Identity() def forward(self, data): x = torch_nn_func.pad(data.permute(0, 2, 1).unsqueeze(2), (self. pad_le, self.pad_ri, 0, 0), mode=self.pad_mode).squeeze(2) output = self.l_ac(super(Conv1dKeepLength, self).forward(x)) return output.permute(0, 2, 1) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'output_dim': 4, 'dilation_s': 1, 'kernel_s': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn as torch_nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 112 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 7 x2 = xindex // 28 x3 = xindex % 28 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-4 + x3 + 16 * x2), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 7 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 28 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 7 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_tanh_tanh_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tmp4 = tmp3 * tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp4 tl.store(in_out_ptr0 + x3, tmp3, xmask) tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 7), (28, 1, 112, 4), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(112)](primals_1, buf0, 112, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 7), (28, 7, 1), torch.float32) triton_poi_fused_convolution_1[grid(16, 7)](buf0, buf1, 16, 7, XBLOCK=8, YBLOCK=16, num_warps=4, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4), (16, 4, 1)) del buf1 buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_convolution_tanh_tanh_backward_2[grid(64)](buf3, primals_3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 return reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0 ), primals_2, reinterpret_tensor(buf0, (4, 4, 7), (28, 1, 4), 0), buf4 class Conv1dKeepLengthNew(torch_nn.Conv1d): """ Wrapper for causal convolution Input tensor: (batchsize=1, length, dim_in) Output tensor: (batchsize=1, length, dim_out) https://github.com/pytorch/pytorch/issues/1333 Note: Tanh is optional """ def __init__(self, input_dim, output_dim, dilation_s, kernel_s, causal= False, stride=1, groups=1, bias=True, tanh=True, pad_mode='constant'): super(Conv1dKeepLengthNew, self).__init__(input_dim, output_dim, kernel_s, stride=stride, padding=0, dilation=dilation_s, groups =groups, bias=bias) self.pad_mode = pad_mode self.causal = causal if self.causal: self.pad_le = dilation_s * (kernel_s - 1) self.pad_ri = 0 else: self.pad_le = dilation_s * (kernel_s - 1) // 2 self.pad_ri = dilation_s * (kernel_s - 1) - self.pad_le if tanh: self.l_ac = torch_nn.Tanh() else: self.l_ac = torch_nn.Identity() def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Ninushkat/Impact-Synth-Hardware
Conv1dKeepLength
false
14,111
[ "MIT" ]
55
37a2ecfec51b052b39d1ad0d4676f09d5f00e3c2
https://github.com/Ninushkat/Impact-Synth-Hardware/tree/37a2ecfec51b052b39d1ad0d4676f09d5f00e3c2
ECAAttention
import torch from torch import nn from torch.nn import init class ECAAttention(nn.Module): def __init__(self, kernel_size=3): super().__init__() self.gap = nn.AdaptiveAvgPool2d(1) self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=( kernel_size - 1) // 2) self.sigmoid = nn.Sigmoid() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.001) if m.bias is not None: init.constant_(m.bias, 0) def forward(self, x): y = self.gap(x) y = y.squeeze(-1).permute(0, 2, 1) y = self.conv(y) y = self.sigmoid(y) y = y.permute(0, 2, 1).unsqueeze(-1) return x * y.expand_as(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn from torch.nn import init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 3), (3, 3, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (4, 1, 4 ), (4, 0, 1), 0), primals_2, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 4), (4, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(16)](buf3, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_2[grid(256)](primals_1, buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf4, primals_1, primals_2, reinterpret_tensor(buf1, (4, 1, 4), (4, 1, 1), 0), buf3 class ECAAttentionNew(nn.Module): def __init__(self, kernel_size=3): super().__init__() self.gap = nn.AdaptiveAvgPool2d(1) self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=( kernel_size - 1) // 2) self.sigmoid = nn.Sigmoid() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.001) if m.bias is not None: init.constant_(m.bias, 0) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Nitin-Mane/External-Attention-pytorch
ECAAttention
false
14,112
[ "MIT" ]
4,466
1ceda306c41063af11c956334747763444a4d83f
https://github.com/Nitin-Mane/External-Attention-pytorch/tree/1ceda306c41063af11c956334747763444a4d83f
MlpBlock
import torch from torch import nn class MlpBlock(nn.Module): def __init__(self, input_dim, mlp_dim=512): super().__init__() self.fc1 = nn.Linear(input_dim, mlp_dim) self.gelu = nn.GELU() self.fc2 = nn.Linear(mlp_dim, input_dim) def forward(self, x): return self.fc2(self.gelu(self.fc1(x))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (512, 4), (4, 1)) assert_size_stride(primals_2, (512,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 512), (512, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 512), (512, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 512), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.float32) get_raw_stream(0) triton_poi_fused_gelu_0[grid(32768)](buf0, buf1, 32768, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 512), (512, 1), 0), reinterpret_tensor(primals_4, (512, 4), (1, 512), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf1, (64, 512), (512, 1), 0), primals_4 class MlpBlockNew(nn.Module): def __init__(self, input_dim, mlp_dim=512): super().__init__() self.fc1 = nn.Linear(input_dim, mlp_dim) self.gelu = nn.GELU() self.fc2 = nn.Linear(mlp_dim, input_dim) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Nitin-Mane/External-Attention-pytorch
MlpBlock
false
14,113
[ "MIT" ]
4,466
1ceda306c41063af11c956334747763444a4d83f
https://github.com/Nitin-Mane/External-Attention-pytorch/tree/1ceda306c41063af11c956334747763444a4d83f
OutlookAttention
import math import torch from torch import nn from torch.nn import functional as F class OutlookAttention(nn.Module): def __init__(self, dim, num_heads=1, kernel_size=3, padding=1, stride=1, qkv_bias=False, attn_drop=0.1): super().__init__() self.dim = dim self.num_heads = num_heads self.head_dim = dim // num_heads self.kernel_size = kernel_size self.padding = padding self.stride = stride self.scale = self.head_dim ** -0.5 self.v_pj = nn.Linear(dim, dim, bias=qkv_bias) self.attn = nn.Linear(dim, kernel_size ** 4 * num_heads) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(attn_drop) self.unflod = nn.Unfold(kernel_size, padding, stride) self.pool = nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True) def forward(self, x): B, H, W, C = x.shape v = self.v_pj(x).permute(0, 3, 1, 2) h, w = math.ceil(H / self.stride), math.ceil(W / self.stride) v = self.unflod(v).reshape(B, self.num_heads, self.head_dim, self. kernel_size * self.kernel_size, h * w).permute(0, 1, 4, 3, 2) attn = self.pool(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) attn = self.attn(attn).reshape(B, h * w, self.num_heads, self. kernel_size * self.kernel_size, self.kernel_size * self.kernel_size ).permute(0, 2, 1, 3, 4) attn = self.scale * attn attn = attn.softmax(-1) attn = self.attn_drop(attn) out = (attn @ v).permute(0, 1, 4, 3, 2).reshape(B, C * self. kernel_size * self.kernel_size, h * w) out = F.fold(out, output_size=(H, W), kernel_size=self.kernel_size, padding=self.padding, stride=self.stride) out = self.proj(out.permute(0, 2, 3, 1)) out = self.proj_drop(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_im2col_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = x0 + x1 tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_per_fused__softmax_2(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 576 rnumel = 9 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r2 = rindex x5 = xindex x0 = xindex % 9 x4 = xindex // 144 x6 = xindex % 144 tmp0 = tl.load(in_ptr0 + (r2 + 9 * x5), rmask & xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r2 + 9 * x0), rmask & xmask, eviction_policy= 'evict_last', other=0.0) tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.where(rmask & xmask, tmp5, float('-inf')) tmp8 = triton_helpers.max2(tmp7, 1)[:, None] tmp9 = tmp4 - tmp8 tmp10 = 0.5 tmp11 = tmp9 * tmp10 tmp12 = tl_math.exp(tmp11) tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(rmask & xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp12 / tmp16 tl.store(out_ptr2 + (r2 + 9 * x6 + 1312 * x4), tmp17, rmask & xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 9 x2 = xindex // 36 % 16 x0 = xindex % 4 x3 = xindex // 576 x4 = xindex tmp0 = tl.load(in_ptr0 + (4 * (x1 // 3) + x2 // 4), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (4 * (x1 % 3) + x2 % 4), xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 6, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 6) | ~xmask, 'index out of bounds: 0 <= tmp4 < 6') tmp7 = tmp6 + tmp1 tmp8 = tmp6 < 0 tmp9 = tl.where(tmp8, tmp7, tmp6) tl.device_assert((0 <= tmp9) & (tmp9 < 6) | ~xmask, 'index out of bounds: 0 <= tmp9 < 6') tmp11 = -1 + tmp4 tmp12 = tl.full([1], 0, tl.int64) tmp13 = tmp11 >= tmp12 tmp14 = tl.full([1], 4, tl.int64) tmp15 = tmp11 < tmp14 tmp16 = -1 + tmp9 tmp17 = tmp16 >= tmp12 tmp18 = tmp16 < tmp14 tmp19 = tmp13 & tmp15 tmp20 = tmp19 & tmp17 tmp21 = tmp20 & tmp18 tmp22 = tl.load(in_ptr1 + (-20 + x0 + 4 * tmp9 + 16 * tmp4 + 64 * x3), tmp21 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp22, xmask) @triton.jit def triton_poi_fused_bmm_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 5184 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 81 x1 = xindex // 81 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 81 * (x1 % 16) + 1312 * (x1 // 16)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_col2im_5(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_col2im_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x7 = xindex // 48 % 12 x9 = xindex // 4 % 12 x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 3 x3 = xindex // 48 % 4 x4 = xindex // 192 % 3 x5 = xindex // 576 tmp0 = tl.load(in_ptr0 + x7, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + x9, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (x0 + 4 * x2 + 12 * x4 + 36 * x1 + 144 * x3 + 576 * x5 + (x2 + 3 * x4) // 9), xmask) tmp1 = tl.full([XBLOCK], 6, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 6) | ~xmask, 'index out of bounds: 0 <= tmp4 < 6') tmp7 = tmp6 + tmp1 tmp8 = tmp6 < 0 tmp9 = tl.where(tmp8, tmp7, tmp6) tl.device_assert((0 <= tmp9) & (tmp9 < 6) | ~xmask, 'index out of bounds: 0 <= tmp9 < 6') tl.atomic_add(out_ptr0 + (tmp9 + 6 * tmp4 + 36 * x0 + 144 * x5), tmp11, xmask, sem='relaxed') @triton.jit def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel y1 = yindex // 4 % 4 y0 = yindex % 4 x3 = xindex y2 = yindex // 16 y5 = yindex tmp0 = 1 + y1 tmp1 = tl.full([1, 1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1, 1], 6, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = 1 + y0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (7 + y0 + 6 * y1 + 36 * x3 + 144 * y2), tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0) tl.store(out_ptr0 + (x3 + 4 * y5), tmp11, xmask & ymask) @triton.jit def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (81, 4), (4, 1)) assert_size_stride(primals_4, (81,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((3, 4), (4, 1), torch.int64) get_raw_stream(0) triton_poi_fused_im2col_0[grid(12)](buf1, 12, XBLOCK=16, num_warps= 1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) triton_poi_fused_avg_pool2d_1[grid(256)](primals_1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((64, 81), (81, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 81), (1, 4), 0), out=buf3) del primals_3 buf6 = empty_strided_cuda((4, 1, 16, 9, 9), (1312, 1312, 81, 9, 1), torch.float32) triton_per_fused__softmax_2[grid(576)](buf3, primals_4, buf6, 576, 9, XBLOCK=8, num_warps=2, num_stages=1) del primals_4 buf7 = empty_strided_cuda((4, 1, 16, 9, 4), (576, 1, 36, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(2304)](buf1, buf0, buf7, 2304, XBLOCK =256, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf3, (64, 9, 9), (81, 9, 1), 0) del buf3 triton_poi_fused_bmm_4[grid(5184)](buf6, buf8, 5184, XBLOCK=128, num_warps=4, num_stages=1) buf9 = empty_strided_cuda((64, 9, 4), (36, 4, 1), torch.float32) extern_kernels.bmm(buf8, reinterpret_tensor(buf7, (64, 9, 4), (36, 4, 1), 0), out=buf9) del buf8 buf10 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32 ) triton_poi_fused_col2im_5[grid(576)](buf10, 576, XBLOCK=256, num_warps=4, num_stages=1) buf11 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32 ) triton_poi_fused_col2im_5[grid(576)](buf11, 576, XBLOCK=256, num_warps=4, num_stages=1) triton_poi_fused_col2im_6[grid(2304)](buf1, buf9, buf11, 2304, XBLOCK=128, num_warps=4, num_stages=1) del buf9 buf13 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused_clone_7[grid(64, 4)](buf11, buf13, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del buf11 buf14 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf13, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf14) buf15 = reinterpret_tensor(buf14, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf14 triton_poi_fused_add_8[grid(256)](buf15, primals_6, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_6 return buf15, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0 ), buf6, buf10, reinterpret_tensor(buf13, (64, 4), (4, 1), 0 ), primals_5, reinterpret_tensor(buf7, (64, 4, 9), (36, 1, 4), 0) class OutlookAttentionNew(nn.Module): def __init__(self, dim, num_heads=1, kernel_size=3, padding=1, stride=1, qkv_bias=False, attn_drop=0.1): super().__init__() self.dim = dim self.num_heads = num_heads self.head_dim = dim // num_heads self.kernel_size = kernel_size self.padding = padding self.stride = stride self.scale = self.head_dim ** -0.5 self.v_pj = nn.Linear(dim, dim, bias=qkv_bias) self.attn = nn.Linear(dim, kernel_size ** 4 * num_heads) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(attn_drop) self.unflod = nn.Unfold(kernel_size, padding, stride) self.pool = nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True) def forward(self, input_0): primals_2 = self.v_pj.weight primals_3 = self.attn.weight primals_4 = self.attn.bias primals_5 = self.proj.weight primals_6 = self.proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
Nitin-Mane/External-Attention-pytorch
OutlookAttention
false
14,114
[ "MIT" ]
4,466
1ceda306c41063af11c956334747763444a4d83f
https://github.com/Nitin-Mane/External-Attention-pytorch/tree/1ceda306c41063af11c956334747763444a4d83f
Encoder5
import torch import numpy as np import torch.nn as nn class Encoder5(nn.Module): def __init__(self, model=None, fixed=False): super(Encoder5, self).__init__() self.fixed = fixed self.conv0 = nn.Conv2d(3, 3, 1, 1, 0) self.conv0.weight = nn.Parameter(torch.from_numpy(np.array([[[[0]], [[0]], [[255]]], [[[0]], [[255]], [[0]]], [[[255]], [[0]], [[0] ]]])).float()) self.conv0.bias = nn.Parameter(torch.from_numpy(np.array([-103.939, -116.779, -123.68])).float()) self.conv11 = nn.Conv2d(3, 64, 3, 1, 0) self.conv12 = nn.Conv2d(64, 64, 3, 1, 0) self.conv21 = nn.Conv2d(64, 128, 3, 1, 0) self.conv22 = nn.Conv2d(128, 128, 3, 1, 0) self.conv31 = nn.Conv2d(128, 256, 3, 1, 0) self.conv32 = nn.Conv2d(256, 256, 3, 1, 0) self.conv33 = nn.Conv2d(256, 256, 3, 1, 0) self.conv34 = nn.Conv2d(256, 256, 3, 1, 0) self.conv41 = nn.Conv2d(256, 512, 3, 1, 0) self.conv42 = nn.Conv2d(512, 512, 3, 1, 0) self.conv43 = nn.Conv2d(512, 512, 3, 1, 0) self.conv44 = nn.Conv2d(512, 512, 3, 1, 0) self.conv51 = nn.Conv2d(512, 512, 3, 1, 0) self.relu = nn.ReLU(inplace=True) self.pool = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=False) self.pad = nn.ReflectionPad2d((1, 1, 1, 1)) if model: assert os.path.splitext(model)[1] in {'.t7', '.pth'} if model.endswith('.t7'): t7_model = load_lua(model) load_param(t7_model, 0, self.conv0) load_param(t7_model, 2, self.conv11) load_param(t7_model, 5, self.conv12) load_param(t7_model, 9, self.conv21) load_param(t7_model, 12, self.conv22) load_param(t7_model, 16, self.conv31) load_param(t7_model, 19, self.conv32) load_param(t7_model, 22, self.conv33) load_param(t7_model, 25, self.conv34) load_param(t7_model, 29, self.conv41) load_param(t7_model, 32, self.conv42) load_param(t7_model, 35, self.conv43) load_param(t7_model, 38, self.conv44) load_param(t7_model, 42, self.conv51) else: self.load_state_dict(torch.load(model, map_location=lambda storage, location: storage)) if fixed: for param in self.parameters(): param.requires_grad = False def forward(self, input): y = self.conv0(input) y = self.relu(self.conv11(self.pad(y))) y = self.relu(self.conv12(self.pad(y))) y = self.pool(y) y = self.relu(self.conv21(self.pad(y))) y = self.relu(self.conv22(self.pad(y))) y = self.pool(y) y = self.relu(self.conv31(self.pad(y))) y = self.relu(self.conv32(self.pad(y))) y = self.relu(self.conv33(self.pad(y))) y = self.relu(self.conv34(self.pad(y))) y = self.pool(y) y = self.relu(self.conv41(self.pad(y))) y = self.relu(self.conv42(self.pad(y))) y = self.relu(self.conv43(self.pad(y))) y = self.relu(self.conv44(self.pad(y))) y = self.pool(y) y = self.relu(self.conv51(self.pad(y))) return y def forward_branch(self, input): out0 = self.conv0(input) out11 = self.relu(self.conv11(self.pad(out0))) out12 = self.relu(self.conv12(self.pad(out11))) out12 = self.pool(out12) out21 = self.relu(self.conv21(self.pad(out12))) out22 = self.relu(self.conv22(self.pad(out21))) out22 = self.pool(out22) out31 = self.relu(self.conv31(self.pad(out22))) out32 = self.relu(self.conv32(self.pad(out31))) out33 = self.relu(self.conv33(self.pad(out32))) out34 = self.relu(self.conv34(self.pad(out33))) out34 = self.pool(out34) out41 = self.relu(self.conv41(self.pad(out34))) out42 = self.relu(self.conv42(self.pad(out41))) out43 = self.relu(self.conv43(self.pad(out42))) out44 = self.relu(self.conv44(self.pad(out43))) out44 = self.pool(out44) out51 = self.relu(self.conv51(self.pad(out44))) return out11, out21, out31, out41, out51 def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_reflection_pad2d_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 52272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = xindex // 3 % 66 x2 = xindex // 198 % 66 x3 = xindex // 13068 x4 = xindex tmp0 = tl.load(in_ptr0 + (12285 + x0 + -192 * tl_math.abs(-63 + tl_math .abs(-1 + x2)) + -3 * tl_math.abs(-63 + tl_math.abs(-1 + x1)) + 12288 * x3), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_reflection_pad2d_relu_10(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1115136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = xindex // 64 % 66 x2 = xindex // 4224 % 66 x3 = xindex // 278784 x4 = xindex tmp0 = tl.load(in_ptr0 + (262080 + x0 + -4096 * tl_math.abs(-63 + tl_math.abs(-1 + x2)) + -64 * tl_math.abs(-63 + tl_math.abs(-1 + x1 )) + 262144 * x3), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = xindex // 64 % 32 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 8192 * x2), None) tmp7 = tl.load(in_ptr0 + (4096 + x0 + 128 * x1 + 8192 * x2), None) tmp12 = tl.load(in_ptr0 + (4160 + x0 + 128 * x1 + 8192 * x2), None) tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_reflection_pad2d_13(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 295936 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = xindex // 64 % 34 x2 = xindex // 2176 % 34 x3 = xindex // 73984 x4 = xindex tmp0 = tl.load(in_ptr0 + (257920 + x0 + -8192 * tl_math.abs(-31 + tl_math.abs(-1 + x2)) + -128 * tl_math.abs(-31 + tl_math.abs(-1 + x1)) + 262144 * x3), xmask) tmp1 = tl.load(in_ptr0 + (257984 + x0 + -8192 * tl_math.abs(-31 + tl_math.abs(-1 + x2)) + -128 * tl_math.abs(-31 + tl_math.abs(-1 + x1)) + 262144 * x3), xmask) tmp3 = tl.load(in_ptr0 + (262016 + x0 + -8192 * tl_math.abs(-31 + tl_math.abs(-1 + x2)) + -128 * tl_math.abs(-31 + tl_math.abs(-1 + x1)) + 262144 * x3), xmask) tmp5 = tl.load(in_ptr0 + (262080 + x0 + -8192 * tl_math.abs(-31 + tl_math.abs(-1 + x2)) + -128 * tl_math.abs(-31 + tl_math.abs(-1 + x1)) + 262144 * x3), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x4, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_reflection_pad2d_relu_14(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 % 34 x2 = xindex // 4352 % 34 x3 = xindex // 147968 x4 = xindex tmp0 = tl.load(in_ptr0 + (130944 + x0 + -4096 * tl_math.abs(-31 + tl_math.abs(-1 + x2)) + -128 * tl_math.abs(-31 + tl_math.abs(-1 + x1)) + 131072 * x3), None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + x4, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_16(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 % 16 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 8192 * x2), None) tmp7 = tl.load(in_ptr0 + (4096 + x0 + 256 * x1 + 8192 * x2), None) tmp12 = tl.load(in_ptr0 + (4224 + x0 + 256 * x1 + 8192 * x2), None) tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_reflection_pad2d_17(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 % 18 x2 = xindex // 2304 % 18 x3 = xindex // 41472 x4 = xindex tmp0 = tl.load(in_ptr0 + (126720 + x0 + -8192 * tl_math.abs(-15 + tl_math.abs(-1 + x2)) + -256 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 131072 * x3), None) tmp1 = tl.load(in_ptr0 + (126848 + x0 + -8192 * tl_math.abs(-15 + tl_math.abs(-1 + x2)) + -256 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 131072 * x3), None) tmp3 = tl.load(in_ptr0 + (130816 + x0 + -8192 * tl_math.abs(-15 + tl_math.abs(-1 + x2)) + -256 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 131072 * x3), None) tmp5 = tl.load(in_ptr0 + (130944 + x0 + -8192 * tl_math.abs(-15 + tl_math.abs(-1 + x2)) + -256 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 131072 * x3), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x4, tmp6, None) @triton.jit def triton_poi_fused_convolution_reflection_pad2d_relu_18(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 256 x1 = xindex // 256 % 18 x2 = xindex // 4608 % 18 x3 = xindex // 82944 x4 = xindex tmp0 = tl.load(in_ptr0 + (65280 + x0 + -4096 * tl_math.abs(-15 + tl_math.abs(-1 + x2)) + -256 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 65536 * x3), None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + x4, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_19(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_20(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 256 x1 = xindex // 256 % 8 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 8192 * x2), None) tmp7 = tl.load(in_ptr0 + (4096 + x0 + 512 * x1 + 8192 * x2), None) tmp12 = tl.load(in_ptr0 + (4352 + x0 + 512 * x1 + 8192 * x2), None) tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_reflection_pad2d_21(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 256 x1 = xindex // 256 % 10 x2 = xindex // 2560 % 10 x3 = xindex // 25600 x4 = xindex tmp0 = tl.load(in_ptr0 + (60928 + x0 + -8192 * tl_math.abs(-7 + tl_math .abs(-1 + x2)) + -512 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) + 65536 * x3), None) tmp1 = tl.load(in_ptr0 + (61184 + x0 + -8192 * tl_math.abs(-7 + tl_math .abs(-1 + x2)) + -512 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) + 65536 * x3), None) tmp3 = tl.load(in_ptr0 + (65024 + x0 + -8192 * tl_math.abs(-7 + tl_math .abs(-1 + x2)) + -512 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) + 65536 * x3), None) tmp5 = tl.load(in_ptr0 + (65280 + x0 + -8192 * tl_math.abs(-7 + tl_math .abs(-1 + x2)) + -512 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) + 65536 * x3), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x4, tmp6, None) @triton.jit def triton_poi_fused_convolution_reflection_pad2d_relu_22(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 512 x1 = xindex // 512 % 10 x2 = xindex // 5120 % 10 x3 = xindex // 51200 x4 = xindex tmp0 = tl.load(in_ptr0 + (32256 + x0 + -4096 * tl_math.abs(-7 + tl_math .abs(-1 + x2)) + -512 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) + 32768 * x3), None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + x4, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_23(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_24(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 512 x1 = xindex // 512 % 4 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (512 + x0 + 1024 * x1 + 8192 * x2), None) tmp7 = tl.load(in_ptr0 + (4096 + x0 + 1024 * x1 + 8192 * x2), None) tmp12 = tl.load(in_ptr0 + (4608 + x0 + 1024 * x1 + 8192 * x2), None) tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_reflection_pad2d_25(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 512 x1 = xindex // 512 % 6 x2 = xindex // 3072 % 6 x3 = xindex // 18432 x4 = xindex tmp0 = tl.load(in_ptr0 + (27648 + x0 + -8192 * tl_math.abs(-3 + tl_math .abs(-1 + x2)) + -1024 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 32768 * x3), None) tmp1 = tl.load(in_ptr0 + (28160 + x0 + -8192 * tl_math.abs(-3 + tl_math .abs(-1 + x2)) + -1024 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 32768 * x3), None) tmp3 = tl.load(in_ptr0 + (31744 + x0 + -8192 * tl_math.abs(-3 + tl_math .abs(-1 + x2)) + -1024 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 32768 * x3), None) tmp5 = tl.load(in_ptr0 + (32256 + x0 + -8192 * tl_math.abs(-3 + tl_math .abs(-1 + x2)) + -1024 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 32768 * x3), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x4, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_26(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 512 y1 = yindex // 512 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 512 * x2 + 8192 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask) tl.store(out_ptr1 + (y0 + 512 * x2 + 8192 * y1), tmp6, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_27(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_28(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_29(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_30(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29) = args args.clear() assert_size_stride(primals_1, (3, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_2, (3,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (128,), (1,)) assert_size_stride(primals_12, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256,), (1,)) assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (256,), (1,)) assert_size_stride(primals_18, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_19, (256,), (1,)) assert_size_stride(primals_20, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_21, (512,), (1,)) assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (512,), (1,)) assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (512,), (1,)) assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_27, (512,), (1,)) assert_size_stride(primals_28, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_29, (512,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) get_raw_stream(0) triton_poi_fused_0[grid(12, 4096)](primals_3, buf0, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf1 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32) triton_poi_fused_1[grid(192, 9)](primals_4, buf1, 192, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch. float32) triton_poi_fused_2[grid(4096, 9)](primals_6, buf2, 4096, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_3[grid(8192, 9)](primals_8, buf3, 8192, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_4[grid(16384, 9)](primals_10, buf4, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf5 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_5[grid(32768, 9)](primals_12, buf5, 32768, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_12 buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_6[grid(65536, 9)](primals_14, buf6, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_14 buf7 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_6[grid(65536, 9)](primals_16, buf7, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_16 buf8 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_6[grid(65536, 9)](primals_18, buf8, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_18 buf9 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_7[grid(131072, 9)](primals_20, buf9, 131072, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_20 buf10 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_22, buf10, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_22 buf11 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_24, buf11, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_24 buf12 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_26, buf12, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_26 buf13 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_28, buf13, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_28 buf14 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 3, 64, 64), (12288, 1, 192, 3)) buf15 = empty_strided_cuda((4, 3, 66, 66), (13068, 1, 198, 3), torch.float32) triton_poi_fused_convolution_reflection_pad2d_9[grid(52272)](buf14, primals_2, buf15, 52272, XBLOCK=512, num_warps=4, num_stages=1) del buf14 del primals_2 buf16 = extern_kernels.convolution(buf15, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf17 = empty_strided_cuda((4, 64, 66, 66), (278784, 1, 4224, 64), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_10[grid(1115136)]( buf16, primals_5, buf17, 1115136, XBLOCK=1024, num_warps=4, num_stages=1) buf18 = extern_kernels.convolution(buf17, buf2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf19 = buf18 del buf18 triton_poi_fused_convolution_relu_11[grid(1048576)](buf19, primals_7, 1048576, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf20 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.int8) triton_poi_fused_max_pool2d_with_indices_12[grid(262144)](buf19, buf20, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf21 = empty_strided_cuda((4, 64, 34, 34), (73984, 1, 2176, 64), torch.float32) triton_poi_fused_max_pool2d_with_indices_reflection_pad2d_13[grid( 295936)](buf19, buf21, 295936, XBLOCK=1024, num_warps=4, num_stages=1) buf22 = extern_kernels.convolution(buf21, buf3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf23 = empty_strided_cuda((4, 128, 34, 34), (147968, 1, 4352, 128), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_14[grid(591872)]( buf22, primals_9, buf23, 591872, XBLOCK=512, num_warps=8, num_stages=1) buf24 = extern_kernels.convolution(buf23, buf4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf25 = buf24 del buf24 triton_poi_fused_convolution_relu_15[grid(524288)](buf25, primals_11, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_11 buf26 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.int8) triton_poi_fused_max_pool2d_with_indices_16[grid(131072)](buf25, buf26, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf27 = empty_strided_cuda((4, 128, 18, 18), (41472, 1, 2304, 128), torch.float32) triton_poi_fused_max_pool2d_with_indices_reflection_pad2d_17[grid( 165888)](buf25, buf27, 165888, XBLOCK=512, num_warps=8, num_stages=1) buf28 = extern_kernels.convolution(buf27, buf5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf29 = empty_strided_cuda((4, 256, 18, 18), (82944, 1, 4608, 256), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_18[grid(331776)]( buf28, primals_13, buf29, 331776, XBLOCK=1024, num_warps=4, num_stages=1) buf30 = extern_kernels.convolution(buf29, buf6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf31 = empty_strided_cuda((4, 256, 18, 18), (82944, 1, 4608, 256), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_18[grid(331776)]( buf30, primals_15, buf31, 331776, XBLOCK=1024, num_warps=4, num_stages=1) buf32 = extern_kernels.convolution(buf31, buf7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf33 = empty_strided_cuda((4, 256, 18, 18), (82944, 1, 4608, 256), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_18[grid(331776)]( buf32, primals_17, buf33, 331776, XBLOCK=1024, num_warps=4, num_stages=1) buf34 = extern_kernels.convolution(buf33, buf8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf35 = buf34 del buf34 triton_poi_fused_convolution_relu_19[grid(262144)](buf35, primals_19, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_19 buf36 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.int8) triton_poi_fused_max_pool2d_with_indices_20[grid(65536)](buf35, buf36, 65536, XBLOCK=256, num_warps=4, num_stages=1) buf37 = empty_strided_cuda((4, 256, 10, 10), (25600, 1, 2560, 256), torch.float32) triton_poi_fused_max_pool2d_with_indices_reflection_pad2d_21[grid( 102400)](buf35, buf37, 102400, XBLOCK=512, num_warps=8, num_stages=1) buf38 = extern_kernels.convolution(buf37, buf9, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf39 = empty_strided_cuda((4, 512, 10, 10), (51200, 1, 5120, 512), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_22[grid(204800)]( buf38, primals_21, buf39, 204800, XBLOCK=512, num_warps=8, num_stages=1) buf40 = extern_kernels.convolution(buf39, buf10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf41 = empty_strided_cuda((4, 512, 10, 10), (51200, 1, 5120, 512), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_22[grid(204800)]( buf40, primals_23, buf41, 204800, XBLOCK=512, num_warps=8, num_stages=1) buf42 = extern_kernels.convolution(buf41, buf11, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf43 = empty_strided_cuda((4, 512, 10, 10), (51200, 1, 5120, 512), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_22[grid(204800)]( buf42, primals_25, buf43, 204800, XBLOCK=512, num_warps=8, num_stages=1) buf44 = extern_kernels.convolution(buf43, buf12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf44, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf45 = buf44 del buf44 triton_poi_fused_convolution_relu_23[grid(131072)](buf45, primals_27, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_27 buf46 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.int8) triton_poi_fused_max_pool2d_with_indices_24[grid(32768)](buf45, buf46, 32768, XBLOCK=256, num_warps=4, num_stages=1) buf47 = empty_strided_cuda((4, 512, 6, 6), (18432, 1, 3072, 512), torch.float32) triton_poi_fused_max_pool2d_with_indices_reflection_pad2d_25[grid( 73728)](buf45, buf47, 73728, XBLOCK=512, num_warps=8, num_stages=1) buf48 = extern_kernels.convolution(buf47, buf13, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf48, (4, 512, 4, 4), (8192, 1, 2048, 512)) buf49 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch. float32) buf50 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_26[grid(2048, 16) ](buf48, primals_29, buf49, buf50, 2048, 16, XBLOCK=16, YBLOCK= 16, num_warps=4, num_stages=1) del buf48 del primals_29 buf51 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_27[grid(131072)]( buf42, primals_25, buf51, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del buf42 del primals_25 buf52 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_27[grid(131072)]( buf40, primals_23, buf52, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del buf40 del primals_23 buf53 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_27[grid(131072)]( buf38, primals_21, buf53, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del buf38 del primals_21 buf54 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_28[grid(262144)]( buf32, primals_17, buf54, 262144, XBLOCK=512, num_warps=8, num_stages=1) del buf32 del primals_17 buf55 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_28[grid(262144)]( buf30, primals_15, buf55, 262144, XBLOCK=512, num_warps=8, num_stages=1) del buf30 del primals_15 buf56 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_28[grid(262144)]( buf28, primals_13, buf56, 262144, XBLOCK=512, num_warps=8, num_stages=1) del buf28 del primals_13 buf57 = empty_strided_cuda((4, 128, 32, 32), (131072, 1, 4096, 128), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_29[grid(524288)]( buf22, primals_9, buf57, 524288, XBLOCK=512, num_warps=8, num_stages=1) del buf22 del primals_9 buf58 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_30[grid(1048576)]( buf16, primals_5, buf58, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del buf16 del primals_5 return (buf49, primals_1, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf10, buf11, buf12, buf13, buf15, buf17, buf19, buf20, buf21, buf23, buf25, buf26, buf27, buf29, buf31, buf33, buf35, buf36, buf37, buf39, buf41, buf43, buf45, buf46, buf47, buf50, buf51, buf52, buf53, buf54, buf55, buf56, buf57, buf58) class Encoder5New(nn.Module): def __init__(self, model=None, fixed=False): super(Encoder5New, self).__init__() self.fixed = fixed self.conv0 = nn.Conv2d(3, 3, 1, 1, 0) self.conv0.weight = nn.Parameter(torch.from_numpy(np.array([[[[0]], [[0]], [[255]]], [[[0]], [[255]], [[0]]], [[[255]], [[0]], [[0] ]]])).float()) self.conv0.bias = nn.Parameter(torch.from_numpy(np.array([-103.939, -116.779, -123.68])).float()) self.conv11 = nn.Conv2d(3, 64, 3, 1, 0) self.conv12 = nn.Conv2d(64, 64, 3, 1, 0) self.conv21 = nn.Conv2d(64, 128, 3, 1, 0) self.conv22 = nn.Conv2d(128, 128, 3, 1, 0) self.conv31 = nn.Conv2d(128, 256, 3, 1, 0) self.conv32 = nn.Conv2d(256, 256, 3, 1, 0) self.conv33 = nn.Conv2d(256, 256, 3, 1, 0) self.conv34 = nn.Conv2d(256, 256, 3, 1, 0) self.conv41 = nn.Conv2d(256, 512, 3, 1, 0) self.conv42 = nn.Conv2d(512, 512, 3, 1, 0) self.conv43 = nn.Conv2d(512, 512, 3, 1, 0) self.conv44 = nn.Conv2d(512, 512, 3, 1, 0) self.conv51 = nn.Conv2d(512, 512, 3, 1, 0) self.relu = nn.ReLU(inplace=True) self.pool = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=False) self.pad = nn.ReflectionPad2d((1, 1, 1, 1)) if model: assert os.path.splitext(model)[1] in {'.t7', '.pth'} if model.endswith('.t7'): t7_model = load_lua(model) load_param(t7_model, 0, self.conv0) load_param(t7_model, 2, self.conv11) load_param(t7_model, 5, self.conv12) load_param(t7_model, 9, self.conv21) load_param(t7_model, 12, self.conv22) load_param(t7_model, 16, self.conv31) load_param(t7_model, 19, self.conv32) load_param(t7_model, 22, self.conv33) load_param(t7_model, 25, self.conv34) load_param(t7_model, 29, self.conv41) load_param(t7_model, 32, self.conv42) load_param(t7_model, 35, self.conv43) load_param(t7_model, 38, self.conv44) load_param(t7_model, 42, self.conv51) else: self.load_state_dict(torch.load(model, map_location=lambda storage, location: storage)) if fixed: for param in self.parameters(): param.requires_grad = False def forward_branch(self, input): out0 = self.conv0(input) out11 = self.relu(self.conv11(self.pad(out0))) out12 = self.relu(self.conv12(self.pad(out11))) out12 = self.pool(out12) out21 = self.relu(self.conv21(self.pad(out12))) out22 = self.relu(self.conv22(self.pad(out21))) out22 = self.pool(out22) out31 = self.relu(self.conv31(self.pad(out22))) out32 = self.relu(self.conv32(self.pad(out31))) out33 = self.relu(self.conv33(self.pad(out32))) out34 = self.relu(self.conv34(self.pad(out33))) out34 = self.pool(out34) out41 = self.relu(self.conv41(self.pad(out34))) out42 = self.relu(self.conv42(self.pad(out41))) out43 = self.relu(self.conv43(self.pad(out42))) out44 = self.relu(self.conv44(self.pad(out43))) out44 = self.pool(out44) out51 = self.relu(self.conv51(self.pad(out44))) return out11, out21, out31, out41, out51 def forward(self, input_0): primals_1 = self.conv0.weight primals_2 = self.conv0.bias primals_4 = self.conv11.weight primals_5 = self.conv11.bias primals_6 = self.conv12.weight primals_7 = self.conv12.bias primals_8 = self.conv21.weight primals_9 = self.conv21.bias primals_10 = self.conv22.weight primals_11 = self.conv22.bias primals_12 = self.conv31.weight primals_13 = self.conv31.bias primals_14 = self.conv32.weight primals_15 = self.conv32.bias primals_16 = self.conv33.weight primals_17 = self.conv33.bias primals_18 = self.conv34.weight primals_19 = self.conv34.bias primals_20 = self.conv41.weight primals_21 = self.conv41.bias primals_22 = self.conv42.weight primals_23 = self.conv42.bias primals_24 = self.conv43.weight primals_25 = self.conv43.bias primals_26 = self.conv44.weight primals_27 = self.conv44.bias primals_28 = self.conv51.weight primals_29 = self.conv51.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29]) return output[0]
MingSun-Tse/Collaborative-Distillation
Encoder5
false
14,115
[ "MIT" ]
172
915712674af82ff91d926d922c14988cce0430f3
https://github.com/MingSun-Tse/Collaborative-Distillation/tree/915712674af82ff91d926d922c14988cce0430f3
SincFilter
import torch import numpy as np import torch.utils.data import torch.nn as torch_nn class SincFilter(torch_nn.Module): """ SincFilter Given the cut-off-frequency, produce the low-pass and high-pass windowed-sinc-filters. If input cut-off-frequency is (batchsize=1, signal_length, 1), output filter coef is (batchsize=1, signal_length, filter_order). For each time step in [1, signal_length), we calculate one filter for low-pass sinc filter and another for high-pass filter. Example: import scipy import scipy.signal import numpy as np filter_order = 31 cut_f = 0.2 sinc_layer = SincFilter(filter_order) lp_coef, hp_coef = sinc_layer(torch.ones(1, 10, 1) * cut_f) w, h1 = scipy.signal.freqz(lp_coef[0, 0, :].numpy(), [1]) w, h2 = scipy.signal.freqz(hp_coef[0, 0, :].numpy(), [1]) plt.plot(w, 20*np.log10(np.abs(h1))) plt.plot(w, 20*np.log10(np.abs(h2))) plt.plot([cut_f * np.pi, cut_f * np.pi], [-100, 0]) """ def __init__(self, filter_order): super(SincFilter, self).__init__() self.half_k = (filter_order - 1) // 2 self.order = self.half_k * 2 + 1 def hamming_w(self, n_index): """ prepare hamming window for each time step n_index (batchsize=1, signal_length, filter_order) For each time step, n_index will be [-(M-1)/2, ... 0, (M-1)/2] n_index[0, 0, :] = [-(M-1)/2, ... 0, (M-1)/2] n_index[0, 1, :] = [-(M-1)/2, ... 0, (M-1)/2] ... output (batchsize=1, signal_length, filter_order) output[0, 0, :] = hamming_window output[0, 1, :] = hamming_window ... """ return 0.54 + 0.46 * torch.cos(2 * np.pi * n_index / self.order) def sinc(self, x): """ Normalized sinc-filter sin( pi * x) / pi * x https://en.wikipedia.org/wiki/Sinc_function Assume x (batchsize, signal_length, filter_order) and x[0, 0, :] = [-half_order, - half_order+1, ... 0, ..., half_order] x[:, :, self.half_order] -> time index = 0, sinc(0)=1 """ y = torch.zeros_like(x) y[:, :, 0:self.half_k] = torch.sin(np.pi * x[:, :, 0:self.half_k]) / ( np.pi * x[:, :, 0:self.half_k]) y[:, :, self.half_k + 1:] = torch.sin(np.pi * x[:, :, self.half_k + 1:] ) / (np.pi * x[:, :, self.half_k + 1:]) y[:, :, self.half_k] = 1 return y def forward(self, cut_f): """ lp_coef, hp_coef = forward(self, cut_f) cut-off frequency cut_f (batchsize=1, length, dim = 1) lp_coef: low-pass filter coefs (batchsize, length, filter_order) hp_coef: high-pass filter coefs (batchsize, length, filter_order) """ with torch.no_grad(): lp_coef = torch.arange(-self.half_k, self.half_k + 1, device= cut_f.device) lp_coef = lp_coef.repeat(cut_f.shape[0], cut_f.shape[1], 1) hp_coef = torch.arange(-self.half_k, self.half_k + 1, device= cut_f.device) hp_coef = hp_coef.repeat(cut_f.shape[0], cut_f.shape[1], 1) tmp_one = torch.pow(-1, hp_coef) lp_coef = cut_f * self.sinc(cut_f * lp_coef) * self.hamming_w(lp_coef) hp_coef = (self.sinc(hp_coef) - cut_f * self.sinc(cut_f * hp_coef) ) * self.hamming_w(hp_coef) lp_coef_norm = torch.sum(lp_coef, axis=2).unsqueeze(-1) hp_coef_norm = torch.sum(hp_coef * tmp_one, axis=2).unsqueeze(-1) lp_coef = lp_coef / lp_coef_norm hp_coef = hp_coef / hp_coef_norm return lp_coef, hp_coef def get_inputs(): return [torch.rand([4, 4, 3])] def get_init_inputs(): return [[], {'filter_order': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np import torch.utils.data import torch.nn as torch_nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_arange_copy_cos_div_fill_lift_fresh_mul_repeat_sin_sub_zeros_like_0( in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 3 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = x0 tmp2 = tl.full([1], 1, tl.int32) tmp3 = tmp1 == tmp2 tmp4 = tl.full([1], 2, tl.int64) tmp5 = tmp1 >= tmp4 tmp6 = tl.load(in_ptr0 + x2, tmp5 & xmask, other=0.0) tmp7 = -1 + x0 tmp8 = tmp7.to(tl.float32) tmp9 = tmp6 * tmp8 tmp10 = 3.141592653589793 tmp11 = tmp9 * tmp10 tmp12 = tl_math.sin(tmp11) tmp13 = tmp12 / tmp11 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp5, tmp13, tmp14) tmp16 = tl.full([1], 1, tl.int64) tmp17 = tmp1 < tmp16 tmp18 = tl.load(in_ptr0 + x2, tmp17 & xmask, other=0.0) tmp19 = tmp18 * tmp8 tmp20 = tmp19 * tmp10 tmp21 = tl_math.sin(tmp20) tmp22 = tmp21 / tmp20 tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype) tmp24 = tl.where(tmp17, tmp22, tmp23) tmp25 = 0.0 tmp26 = tl.where(tmp17, tmp24, tmp25) tmp27 = tl.where(tmp5, tmp15, tmp26) tmp28 = 1.0 tmp29 = tl.where(tmp3, tmp28, tmp27) tmp30 = tmp0 * tmp29 tmp31 = 6.283185307179586 tmp32 = tmp8 * tmp31 tmp33 = 0.3333333333333333 tmp34 = tmp32 * tmp33 tmp35 = tl_math.cos(tmp34) tmp36 = 0.46 tmp37 = tmp35 * tmp36 tmp38 = 0.54 tmp39 = tmp37 + tmp38 tmp40 = tmp30 * tmp39 tmp41 = tmp8 * tmp10 tmp42 = tl_math.sin(tmp41) tmp43 = tmp42 / tmp41 tmp44 = tmp43.to(tl.int64) tmp45 = tl.full(tmp44.shape, 0, tmp44.dtype) tmp46 = tl.where(tmp5, tmp44, tmp45) tmp47 = tl.where(tmp17, tmp44, tmp45) tmp48 = tl.full([1], 0, tl.int64) tmp49 = tl.where(tmp17, tmp47, tmp48) tmp50 = tl.where(tmp5, tmp46, tmp49) tmp51 = tl.where(tmp3, tmp16, tmp50) tmp52 = tmp51.to(tl.float32) tmp53 = tmp52 - tmp30 tl.store(out_ptr0 + x2, tmp40, xmask) tl.store(out_ptr1 + x2, tmp53, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 3 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 3 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 3 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 3 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp6 = tmp0 / tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_arange_repeat_2(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x2 = xindex tmp0 = -1 + x0 tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_add_arange_cos_div_mul_repeat_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 3 * x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + 3 * x0, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (1 + 3 * x0), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr1 + (1 + 3 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr0 + (2 + 3 * x0), xmask, eviction_policy='evict_last' ) tmp27 = tl.load(in_ptr1 + (2 + 3 * x0), xmask, eviction_policy='evict_last' ) tmp1 = -2.0943951023931953 tmp2 = tl_math.cos(tmp1) tmp3 = 0.46 tmp4 = tmp2 * tmp3 tmp5 = 0.54 tmp6 = tmp4 + tmp5 tmp7 = tmp0 * tmp6 tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 * tmp9 tmp12 = 0.0 tmp13 = tl_math.cos(tmp12) tmp14 = tmp13 * tmp3 tmp15 = tmp14 + tmp5 tmp16 = tmp11 * tmp15 tmp18 = tmp17.to(tl.float32) tmp19 = tmp16 * tmp18 tmp20 = tmp10 + tmp19 tmp22 = 2.0943951023931953 tmp23 = tl_math.cos(tmp22) tmp24 = tmp23 * tmp3 tmp25 = tmp24 + tmp5 tmp26 = tmp21 * tmp25 tmp28 = tmp27.to(tl.float32) tmp29 = tmp26 * tmp28 tmp30 = tmp20 + tmp29 tl.store(out_ptr0 + x0, tmp30, xmask) @triton.jit def triton_poi_fused_add_arange_cos_div_mul_repeat_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 3 x1 = xindex // 3 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp13 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = -1 + x0 tmp2 = tmp1.to(tl.float32) tmp3 = 6.283185307179586 tmp4 = tmp2 * tmp3 tmp5 = 0.3333333333333333 tmp6 = tmp4 * tmp5 tmp7 = tl_math.cos(tmp6) tmp8 = 0.46 tmp9 = tmp7 * tmp8 tmp10 = 0.54 tmp11 = tmp9 + tmp10 tmp12 = tmp0 * tmp11 tmp14 = tmp12 / tmp13 tl.store(in_out_ptr0 + x2, tmp14, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 3), (12, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_arange_copy_cos_div_fill_lift_fresh_mul_repeat_sin_sub_zeros_like_0[ grid(48)](arg0_1, buf0, buf2, 48, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.float32) triton_poi_fused_div_1[grid(48)](buf0, buf1, 48, XBLOCK=64, num_warps=1, num_stages=1) del buf0 buf3 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.int64) triton_poi_fused_arange_repeat_2[grid(48)](buf3, 48, XBLOCK=64, num_warps=1, num_stages=1) buf4 = torch.ops.aten.pow.Scalar(-1, buf3) del buf3 buf5 = buf4 del buf4 buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_arange_cos_div_mul_repeat_sum_3[grid(16)](buf2, buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf5 buf7 = buf2 del buf2 triton_poi_fused_add_arange_cos_div_mul_repeat_4[grid(48)](buf7, buf6, 48, XBLOCK=64, num_warps=1, num_stages=1) del buf6 return buf1, buf7 class SincFilterNew(torch_nn.Module): """ SincFilter Given the cut-off-frequency, produce the low-pass and high-pass windowed-sinc-filters. If input cut-off-frequency is (batchsize=1, signal_length, 1), output filter coef is (batchsize=1, signal_length, filter_order). For each time step in [1, signal_length), we calculate one filter for low-pass sinc filter and another for high-pass filter. Example: import scipy import scipy.signal import numpy as np filter_order = 31 cut_f = 0.2 sinc_layer = SincFilter(filter_order) lp_coef, hp_coef = sinc_layer(torch.ones(1, 10, 1) * cut_f) w, h1 = scipy.signal.freqz(lp_coef[0, 0, :].numpy(), [1]) w, h2 = scipy.signal.freqz(hp_coef[0, 0, :].numpy(), [1]) plt.plot(w, 20*np.log10(np.abs(h1))) plt.plot(w, 20*np.log10(np.abs(h2))) plt.plot([cut_f * np.pi, cut_f * np.pi], [-100, 0]) """ def __init__(self, filter_order): super(SincFilterNew, self).__init__() self.half_k = (filter_order - 1) // 2 self.order = self.half_k * 2 + 1 def hamming_w(self, n_index): """ prepare hamming window for each time step n_index (batchsize=1, signal_length, filter_order) For each time step, n_index will be [-(M-1)/2, ... 0, (M-1)/2] n_index[0, 0, :] = [-(M-1)/2, ... 0, (M-1)/2] n_index[0, 1, :] = [-(M-1)/2, ... 0, (M-1)/2] ... output (batchsize=1, signal_length, filter_order) output[0, 0, :] = hamming_window output[0, 1, :] = hamming_window ... """ return 0.54 + 0.46 * torch.cos(2 * np.pi * n_index / self.order) def sinc(self, x): """ Normalized sinc-filter sin( pi * x) / pi * x https://en.wikipedia.org/wiki/Sinc_function Assume x (batchsize, signal_length, filter_order) and x[0, 0, :] = [-half_order, - half_order+1, ... 0, ..., half_order] x[:, :, self.half_order] -> time index = 0, sinc(0)=1 """ y = torch.zeros_like(x) y[:, :, 0:self.half_k] = torch.sin(np.pi * x[:, :, 0:self.half_k]) / ( np.pi * x[:, :, 0:self.half_k]) y[:, :, self.half_k + 1:] = torch.sin(np.pi * x[:, :, self.half_k + 1:] ) / (np.pi * x[:, :, self.half_k + 1:]) y[:, :, self.half_k] = 1 return y def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0], output[1]
Ninushkat/Impact-Synth-Hardware
SincFilter
false
14,116
[ "MIT" ]
55
37a2ecfec51b052b39d1ad0d4676f09d5f00e3c2
https://github.com/Ninushkat/Impact-Synth-Hardware/tree/37a2ecfec51b052b39d1ad0d4676f09d5f00e3c2
ScaledDotProductAttention
import torch import numpy as np from torch import nn from torch.nn import init class ScaledDotProductAttention(nn.Module): """ Scaled dot-product attention """ def __init__(self, d_model, d_k, d_v, h, dropout=0.1): """ :param d_model: Output dimensionality of the model :param d_k: Dimensionality of queries and keys :param d_v: Dimensionality of values :param h: Number of heads """ super(ScaledDotProductAttention, self).__init__() self.fc_q = nn.Linear(d_model, h * d_k) self.fc_k = nn.Linear(d_model, h * d_k) self.fc_v = nn.Linear(d_model, h * d_v) self.fc_o = nn.Linear(h * d_v, d_model) self.dropout = nn.Dropout(dropout) self.d_model = d_model self.d_k = d_k self.d_v = d_v self.h = h self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.001) if m.bias is not None: init.constant_(m.bias, 0) def forward(self, queries, keys, values, attention_mask=None, attention_weights=None): """ Computes :param queries: Queries (b_s, nq, d_model) :param keys: Keys (b_s, nk, d_model) :param values: Values (b_s, nk, d_model) :param attention_mask: Mask over attention values (b_s, h, nq, nk). True indicates masking. :param attention_weights: Multiplicative weights for attention values (b_s, h, nq, nk). :return: """ b_s, nq = queries.shape[:2] nk = keys.shape[1] q = self.fc_q(queries).view(b_s, nq, self.h, self.d_k).permute(0, 2, 1, 3) k = self.fc_k(keys).view(b_s, nk, self.h, self.d_k).permute(0, 2, 3, 1) v = self.fc_v(values).view(b_s, nk, self.h, self.d_v).permute(0, 2, 1, 3) att = torch.matmul(q, k) / np.sqrt(self.d_k) if attention_weights is not None: att = att * attention_weights if attention_mask is not None: att = att.masked_fill(attention_mask, -np.inf) att = torch.softmax(att, -1) att = self.dropout(att) out = torch.matmul(att, v).permute(0, 2, 1, 3).contiguous().view(b_s, nq, self.h * self.d_v) out = self.fc_o(out) return out def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'d_model': 4, 'd_k': 4, 'd_v': 4, 'h': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn from torch.nn import init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_sqrt_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp8 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = tl.full([1], 2.0, tl.float64) tmp2 = tl.full([1], 0.0, tl.float64) tmp3 = tmp1 >= tmp2 tmp4 = 1.0 tmp5 = -1.0 tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = tmp0 * tmp6 tmp9 = tmp8 * tmp6 tmp11 = tmp10 * tmp6 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp14 = tmp13 * tmp6 tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp17 = tmp16 * tmp6 tmp18 = triton_helpers.maximum(tmp15, tmp17) tmp19 = tmp7 - tmp18 tmp20 = tmp6.to(tl.float64) tmp21 = tmp20 * tmp1 tmp22 = tmp21.to(tl.float32) tmp23 = tmp19 / tmp22 tmp24 = tl_math.exp(tmp23) tl.store(out_ptr0 + x2, tmp24, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (16, 4), (4, 1)) assert_size_stride(primals_4, (16,), (1,)) assert_size_stride(primals_5, (16, 4), (4, 1)) assert_size_stride(primals_6, (16,), (1,)) assert_size_stride(primals_7, (16, 4), (4, 1)) assert_size_stride(primals_8, (16,), (1,)) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (4, 16), (16, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf1) del primals_5 buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 16), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](buf0, primals_4, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused_clone_1[grid(64, 4)](buf1, primals_6, buf4, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del primals_6 buf5 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_sqrt_2[grid(256)](buf5, buf6, 256, XBLOCK =128, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) buf8 = buf6 del buf6 triton_poi_fused_clone_0[grid(256)](buf2, primals_8, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_8 buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_4[grid(256)](buf9, buf10, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf9 buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf10, (16, 16), (16, 1), 0), reinterpret_tensor(primals_10, (16, 4), (1, 16), 0 ), alpha=1, beta=1, out=buf11) del primals_11 return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf10, (16, 16), (16, 1), 0 ), primals_10, reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0) class ScaledDotProductAttentionNew(nn.Module): """ Scaled dot-product attention """ def __init__(self, d_model, d_k, d_v, h, dropout=0.1): """ :param d_model: Output dimensionality of the model :param d_k: Dimensionality of queries and keys :param d_v: Dimensionality of values :param h: Number of heads """ super(ScaledDotProductAttentionNew, self).__init__() self.fc_q = nn.Linear(d_model, h * d_k) self.fc_k = nn.Linear(d_model, h * d_k) self.fc_v = nn.Linear(d_model, h * d_v) self.fc_o = nn.Linear(h * d_v, d_model) self.dropout = nn.Dropout(dropout) self.d_model = d_model self.d_k = d_k self.d_v = d_v self.h = h self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.001) if m.bias is not None: init.constant_(m.bias, 0) def forward(self, input_0, input_1, input_2): primals_3 = self.fc_q.weight primals_4 = self.fc_q.bias primals_5 = self.fc_k.weight primals_6 = self.fc_k.bias primals_7 = self.fc_v.weight primals_8 = self.fc_v.bias primals_10 = self.fc_o.weight primals_11 = self.fc_o.bias primals_1 = input_0 primals_2 = input_1 primals_9 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
Nitin-Mane/External-Attention-pytorch
ScaledDotProductAttention
false
14,117
[ "MIT" ]
4,466
1ceda306c41063af11c956334747763444a4d83f
https://github.com/Nitin-Mane/External-Attention-pytorch/tree/1ceda306c41063af11c956334747763444a4d83f
GetMask
import torch class GetMask(torch.nn.Module): """ inputs: x: any size outputs:mask: same size as input x """ def __init__(self, pad_idx=0): super(GetMask, self).__init__() self.pad_idx = pad_idx def forward(self, x): mask = torch.ne(x, self.pad_idx).float() return mask def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy_ne_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 != tmp1 tmp3 = tmp2.to(tl.float32) tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__to_copy_ne_0[grid(256)](arg0_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 return buf0, class GetMaskNew(torch.nn.Module): """ inputs: x: any size outputs:mask: same size as input x """ def __init__(self, pad_idx=0): super(GetMaskNew, self).__init__() self.pad_idx = pad_idx def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
NoteXYX/ACL2017
GetMask
false
14,118
[ "Apache-2.0" ]
119
436f59f2aa0044a9d57c95a2a58b2158cb99738d
https://github.com/NoteXYX/ACL2017/tree/436f59f2aa0044a9d57c95a2a58b2158cb99738d
SpatialAttention
import torch from torch import nn class SpatialAttention(nn.Module): def __init__(self, kernel_size=7): super().__init__() self.conv = nn.Conv2d(2, 1, kernel_size=kernel_size, padding= kernel_size // 2) self.sigmoid = nn.Sigmoid() def forward(self, x): max_result, _ = torch.max(x, dim=1, keepdim=True) avg_result = torch.mean(x, dim=1, keepdim=True) result = torch.cat([max_result, avg_result], 1) output = self.conv(result) output = self.sigmoid(output) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 2 x0 = xindex % 16 x2 = xindex // 32 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = triton_helpers.maximum(tmp7, tmp8) tmp10 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = triton_helpers.maximum(tmp9, tmp10) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tl.full([1], 2, tl.int64) tmp17 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp19 = tmp17 + tmp18 tmp20 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tmp19 + tmp20 tmp22 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = tmp21 + tmp22 tmp24 = 4.0 tmp25 = tmp23 / tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp14, tmp25, tmp26) tmp28 = tl.where(tmp4, tmp13, tmp27) tl.store(out_ptr0 + x3, tmp28, xmask) @triton.jit def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 2, 7, 7), (98, 49, 7, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](primals_1, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_sigmoid_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 return buf2, primals_2, buf0, buf2 class SpatialAttentionNew(nn.Module): def __init__(self, kernel_size=7): super().__init__() self.conv = nn.Conv2d(2, 1, kernel_size=kernel_size, padding= kernel_size // 2) self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Nitin-Mane/External-Attention-pytorch
SpatialAttention
false
14,119
[ "MIT" ]
4,466
1ceda306c41063af11c956334747763444a4d83f
https://github.com/Nitin-Mane/External-Attention-pytorch/tree/1ceda306c41063af11c956334747763444a4d83f
UFOAttention
import torch from torch import nn from torch.nn import init def XNorm(x, gamma): norm_tensor = torch.norm(x, 2, -1, True) return x * gamma / norm_tensor class UFOAttention(nn.Module): """ Scaled dot-product attention """ def __init__(self, d_model, d_k, d_v, h, dropout=0.1): """ :param d_model: Output dimensionality of the model :param d_k: Dimensionality of queries and keys :param d_v: Dimensionality of values :param h: Number of heads """ super(UFOAttention, self).__init__() self.fc_q = nn.Linear(d_model, h * d_k) self.fc_k = nn.Linear(d_model, h * d_k) self.fc_v = nn.Linear(d_model, h * d_v) self.fc_o = nn.Linear(h * d_v, d_model) self.dropout = nn.Dropout(dropout) self.gamma = nn.Parameter(torch.randn((1, h, 1, 1))) self.d_model = d_model self.d_k = d_k self.d_v = d_v self.h = h self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.001) if m.bias is not None: init.constant_(m.bias, 0) def forward(self, queries, keys, values): b_s, nq = queries.shape[:2] nk = keys.shape[1] q = self.fc_q(queries).view(b_s, nq, self.h, self.d_k).permute(0, 2, 1, 3) k = self.fc_k(keys).view(b_s, nk, self.h, self.d_k).permute(0, 2, 3, 1) v = self.fc_v(values).view(b_s, nk, self.h, self.d_v).permute(0, 2, 1, 3) kv = torch.matmul(k, v) kv_norm = XNorm(kv, self.gamma) q_norm = XNorm(q, self.gamma) out = torch.matmul(q_norm, kv_norm).permute(0, 2, 1, 3).contiguous( ).view(b_s, nq, self.h * self.d_v) out = self.fc_o(out) return out def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'d_model': 4, 'd_k': 4, 'd_v': 4, 'h': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn from torch.nn import init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_poi_fused_div_linalg_vector_norm_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x2 = xindex // 16 % 4 x5 = xindex // 4 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + 4 * x5, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x5), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x5), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x5), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = libdevice.sqrt(tmp13) tmp15 = tmp2 / tmp14 tl.store(out_ptr0 + x4, tmp15, xmask) @triton.jit def triton_poi_fused_clone_div_linalg_vector_norm_mul_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = xindex // 4 % 4 x5 = xindex // 4 x0 = xindex % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + 4 * x5, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x5), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x5), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x5), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = libdevice.sqrt(tmp13) tmp15 = tmp2 / tmp14 tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp15, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (16, 4), (4, 1)) assert_size_stride(primals_4, (16,), (1,)) assert_size_stride(primals_5, (16, 4), (4, 1)) assert_size_stride(primals_6, (16,), (1,)) assert_size_stride(primals_7, (16, 4), (4, 1)) assert_size_stride(primals_8, (16,), (1,)) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_11, (4, 16), (16, 1)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_3 del primals_4 buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf1) del primals_5 buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 16), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64, 4)](buf1, primals_6, buf3, 64, 4, XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused_clone_1[grid(256)](buf2, primals_8, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_8 buf5 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_div_linalg_vector_norm_mul_2[grid(256)](buf5, primals_10, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_div_linalg_vector_norm_mul_3[grid(256)](buf0, primals_10, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf8) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_4[grid(256)](buf8, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf8 buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_12, reinterpret_tensor(buf9, (16, 16), (16, 1), 0), reinterpret_tensor(primals_11, (16, 4), (1, 16), 0 ), alpha=1, beta=1, out=buf10) del primals_12 return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0 ), primals_10, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), buf0, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0 ), buf5, buf6, reinterpret_tensor(buf9, (16, 16), (16, 1), 0 ), primals_11, reinterpret_tensor(buf7, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0) def XNorm(x, gamma): norm_tensor = torch.norm(x, 2, -1, True) return x * gamma / norm_tensor class UFOAttentionNew(nn.Module): """ Scaled dot-product attention """ def __init__(self, d_model, d_k, d_v, h, dropout=0.1): """ :param d_model: Output dimensionality of the model :param d_k: Dimensionality of queries and keys :param d_v: Dimensionality of values :param h: Number of heads """ super(UFOAttentionNew, self).__init__() self.fc_q = nn.Linear(d_model, h * d_k) self.fc_k = nn.Linear(d_model, h * d_k) self.fc_v = nn.Linear(d_model, h * d_v) self.fc_o = nn.Linear(h * d_v, d_model) self.dropout = nn.Dropout(dropout) self.gamma = nn.Parameter(torch.randn((1, h, 1, 1))) self.d_model = d_model self.d_k = d_k self.d_v = d_v self.h = h self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.001) if m.bias is not None: init.constant_(m.bias, 0) def forward(self, input_0, input_1, input_2): primals_10 = self.gamma primals_3 = self.fc_q.weight primals_4 = self.fc_q.bias primals_5 = self.fc_k.weight primals_6 = self.fc_k.bias primals_7 = self.fc_v.weight primals_8 = self.fc_v.bias primals_11 = self.fc_o.weight primals_12 = self.fc_o.bias primals_1 = input_0 primals_2 = input_1 primals_9 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
Nitin-Mane/External-Attention-pytorch
UFOAttention
false
14,120
[ "MIT" ]
4,466
1ceda306c41063af11c956334747763444a4d83f
https://github.com/Nitin-Mane/External-Attention-pytorch/tree/1ceda306c41063af11c956334747763444a4d83f
NoopLoss
from torch.nn import Module import functools import torch import torch.utils.data import torch.nn as nn from torchvision.models import * import torch.nn.init class NoopLoss(Module): """Just returns the mean of the `output`.""" def forward(self, output, *args): return output.mean() class PrePostInitMeta(type): """A metaclass that calls optional `__pre_init__` and `__post_init__` methods""" def __new__(cls, name, bases, dct): x = super().__new__(cls, name, bases, dct) old_init = x.__init__ def _pass(self): pass @functools.wraps(old_init) def _init(self, *args, **kwargs): self.__pre_init__() old_init(self, *args, **kwargs) self.__post_init__() x.__init__ = _init if not hasattr(x, '__pre_init__'): x.__pre_init__ = _pass if not hasattr(x, '__post_init__'): x.__post_init__ = _pass return x class Module(nn.Module, metaclass=PrePostInitMeta): """Same as `nn.Module`, but no need for subclasses to call `super().__init__`""" def __pre_init__(self): super().__init__() def __init__(self): pass def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import Module import functools import torch.utils.data import torch.nn as nn from torchvision.models import * import torch.nn.init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0)) tmp4 = 256.0 tmp5 = tmp3 / tmp4 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp5, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(1)](buf1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 return buf1, class NoopLossNew(Module): """Just returns the mean of the `output`.""" def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0] class PrePostInitMeta(type): """A metaclass that calls optional `__pre_init__` and `__post_init__` methods""" def __new__(cls, name, bases, dct): x = super().__new__(cls, name, bases, dct) old_init = x.__init__ def _pass(self): pass @functools.wraps(old_init) def _init(self, *args, **kwargs): self.__pre_init__() old_init(self, *args, **kwargs) self.__post_init__() x.__init__ = _init if not hasattr(x, '__pre_init__'): x.__pre_init__ = _pass if not hasattr(x, '__post_init__'): x.__post_init__ = _pass return x class Module(nn.Module, metaclass=PrePostInitMeta): """Same as `nn.Module`, but no need for subclasses to call `super().__init__`""" def __pre_init__(self): super().__init__() def __init__(self): pass
JiahuaWU/fastai
NoopLoss
false
14,121
[ "Apache-2.0" ]
59
13a2df812d875abf0558004283392ab40d9bdea1
https://github.com/JiahuaWU/fastai/tree/13a2df812d875abf0558004283392ab40d9bdea1
ShuffleBlock
import torch import torch.nn as nn class ShuffleBlock(nn.Module): def __init__(self, groups=2): super(ShuffleBlock, self).__init__() self.groups = groups def forward(self, x): """Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]""" N, C, H, W = x.size() g = self.groups return x.view(N, g, C // g, H, W).permute(0, 2, 1, 3, 4).reshape(N, C, H, W) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 % 2 x2 = xindex // 32 % 2 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2 + 32 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 2, 4, 4), (64, 32, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0), class ShuffleBlockNew(nn.Module): def __init__(self, groups=2): super(ShuffleBlockNew, self).__init__() self.groups = groups def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
P2333/Bag-of-Tricks-for-AT
ShuffleBlock
false
14,122
[ "Apache-2.0" ]
192
314683adcfe9ea7c7bfbff50007da510b21f56e1
https://github.com/P2333/Bag-of-Tricks-for-AT/tree/314683adcfe9ea7c7bfbff50007da510b21f56e1
Attention
import math import torch import torch.nn as nn import torch.nn.functional as F import torch.optim class Attention(nn.Module): def __init__(self, embed_dim, hidden_dim=None, out_dim=None, n_head=1, score_function='dot_product', dropout=0): """ Attention Mechanism :param embed_dim: :param hidden_dim: :param out_dim: :param n_head: num of head (Multi-Head Attention) :param score_function: scaled_dot_product / mlp (concat) / bi_linear (general dot) :return (?, q_len, out_dim,) """ super(Attention, self).__init__() if hidden_dim is None: hidden_dim = embed_dim // n_head if out_dim is None: out_dim = embed_dim self.embed_dim = embed_dim self.hidden_dim = hidden_dim self.n_head = n_head self.score_function = score_function self.w_k = nn.Linear(embed_dim, n_head * hidden_dim) self.w_q = nn.Linear(embed_dim, n_head * hidden_dim) self.proj = nn.Linear(n_head * hidden_dim, out_dim) self.dropout = nn.Dropout(dropout) if score_function == 'mlp': self.weight = nn.Parameter(torch.Tensor(hidden_dim * 2)) elif self.score_function == 'bi_linear': self.weight = nn.Parameter(torch.Tensor(hidden_dim, hidden_dim)) else: self.register_parameter('weight', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.hidden_dim) if self.weight is not None: self.weight.data.uniform_(-stdv, stdv) def forward(self, k, q): if len(q.shape) == 2: q = torch.unsqueeze(q, dim=1) if len(k.shape) == 2: k = torch.unsqueeze(k, dim=1) mb_size = k.shape[0] k_len = k.shape[1] q_len = q.shape[1] kx = self.w_k(k).view(mb_size, k_len, self.n_head, self.hidden_dim) kx = kx.permute(2, 0, 1, 3).contiguous().view(-1, k_len, self. hidden_dim) qx = self.w_q(q).view(mb_size, q_len, self.n_head, self.hidden_dim) qx = qx.permute(2, 0, 1, 3).contiguous().view(-1, q_len, self. hidden_dim) if self.score_function == 'dot_product': kt = kx.permute(0, 2, 1) score = torch.bmm(qx, kt) elif self.score_function == 'scaled_dot_product': kt = kx.permute(0, 2, 1) qkt = torch.bmm(qx, kt) score = torch.div(qkt, math.sqrt(self.hidden_dim)) elif self.score_function == 'mlp': kxx = torch.unsqueeze(kx, dim=1).expand(-1, q_len, -1, -1) qxx = torch.unsqueeze(qx, dim=2).expand(-1, -1, k_len, -1) kq = torch.cat((kxx, qxx), dim=-1) score = torch.tanh(torch.matmul(kq, self.weight)) elif self.score_function == 'bi_linear': qw = torch.matmul(qx, self.weight) kt = kx.permute(0, 2, 1) score = torch.bmm(qw, kt) else: raise RuntimeError('invalid score_function') score = F.softmax(score, dim=0) output = torch.bmm(score, kx) output = torch.cat(torch.split(output, mb_size, dim=0), dim=-1) output = self.proj(output) output = self.dropout(output) return output, score def get_inputs(): return [torch.rand([4, 4, 1, 4]), torch.rand([4, 4, 1, 4])] def get_init_inputs(): return [[], {'embed_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 4), (16, 4, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 4), (16, 4, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_3 del primals_4 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_5 del primals_6 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = buf2 del buf2 triton_poi_fused__softmax_1[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = buf3 del buf3 extern_kernels.bmm(buf4, reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), out=buf5) buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, reinterpret_tensor(buf5, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_8 return reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0 ), buf4, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0 ), buf4, reinterpret_tensor(buf5, (16, 4), (4, 1), 0 ), primals_7, reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0) class AttentionNew(nn.Module): def __init__(self, embed_dim, hidden_dim=None, out_dim=None, n_head=1, score_function='dot_product', dropout=0): """ Attention Mechanism :param embed_dim: :param hidden_dim: :param out_dim: :param n_head: num of head (Multi-Head Attention) :param score_function: scaled_dot_product / mlp (concat) / bi_linear (general dot) :return (?, q_len, out_dim,) """ super(AttentionNew, self).__init__() if hidden_dim is None: hidden_dim = embed_dim // n_head if out_dim is None: out_dim = embed_dim self.embed_dim = embed_dim self.hidden_dim = hidden_dim self.n_head = n_head self.score_function = score_function self.w_k = nn.Linear(embed_dim, n_head * hidden_dim) self.w_q = nn.Linear(embed_dim, n_head * hidden_dim) self.proj = nn.Linear(n_head * hidden_dim, out_dim) self.dropout = nn.Dropout(dropout) if score_function == 'mlp': self.weight = nn.Parameter(torch.Tensor(hidden_dim * 2)) elif self.score_function == 'bi_linear': self.weight = nn.Parameter(torch.Tensor(hidden_dim, hidden_dim)) else: self.register_parameter('weight', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.hidden_dim) if self.weight is not None: self.weight.data.uniform_(-stdv, stdv) def forward(self, input_0, input_1): primals_3 = self.w_k.weight primals_4 = self.w_k.bias primals_5 = self.w_q.weight primals_6 = self.w_q.bias primals_7 = self.proj.weight primals_8 = self.proj.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0], output[1]
NouamaneTazi/conv-emotion
Attention
false
14,123
[ "MIT" ]
488
0c9dcb9cc5234a7ca8cf6af81aabe28ef3814d0e
https://github.com/NouamaneTazi/conv-emotion/tree/0c9dcb9cc5234a7ca8cf6af81aabe28ef3814d0e
SimpleAttention
import torch import torch.nn as nn import torch.nn.functional as F import torch.optim class SimpleAttention(nn.Module): def __init__(self, input_dim): super(SimpleAttention, self).__init__() self.input_dim = input_dim self.scalar = nn.Linear(self.input_dim, 1, bias=False) def forward(self, M, x=None): """ M -> (seq_len, batch, vector) x -> dummy argument for the compatibility with MatchingAttention """ scale = self.scalar(M) alpha = F.softmax(scale, dim=0).permute(1, 2, 0) attn_pool = torch.bmm(alpha, M.transpose(0, 1))[:, 0, :] return attn_pool, alpha def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(16)](buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0) del buf0 triton_poi_fused__softmax_1[grid(16)](buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = reinterpret_tensor(buf1, (4, 1, 4), (4, 4, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf2, (4, 1, 4), (1, 1, 4), 0 ), reinterpret_tensor(primals_2, (4, 4, 4), (4, 16, 1), 0), out =buf3) return reinterpret_tensor(buf3, (4, 4), (4, 1), 0), reinterpret_tensor(buf2 , (4, 1, 4), (1, 1, 4), 0), primals_2, buf2 class SimpleAttentionNew(nn.Module): def __init__(self, input_dim): super(SimpleAttentionNew, self).__init__() self.input_dim = input_dim self.scalar = nn.Linear(self.input_dim, 1, bias=False) def forward(self, input_0): primals_1 = self.scalar.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0], output[1]
NouamaneTazi/conv-emotion
SimpleAttention
false
14,124
[ "MIT" ]
488
0c9dcb9cc5234a7ca8cf6af81aabe28ef3814d0e
https://github.com/NouamaneTazi/conv-emotion/tree/0c9dcb9cc5234a7ca8cf6af81aabe28ef3814d0e
MaskedMSELoss
import torch import torch.nn as nn import torch.optim class MaskedMSELoss(nn.Module): def __init__(self): super(MaskedMSELoss, self).__init__() self.loss = nn.MSELoss(reduction='sum') def forward(self, pred, target, mask): """ pred -> batch*seq_len target -> batch*seq_len mask -> batch*seq_len """ loss = self.loss(pred * mask, target) / torch.sum(mask) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_mse_loss_mul_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp3 = tl.load(in_ptr2 + r0, None) tmp2 = tmp0 * tmp1 tmp4 = tmp2 - tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [RBLOCK]) tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0)) tmp9 = tl.broadcast_to(tmp1, [RBLOCK]) tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0)) tmp12 = tmp8 / tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_div_mse_loss_mul_sum_0[grid(1)](buf2, arg0_1, arg1_1, arg2_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf2, class MaskedMSELossNew(nn.Module): def __init__(self): super(MaskedMSELossNew, self).__init__() self.loss = nn.MSELoss(reduction='sum') def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
NouamaneTazi/conv-emotion
MaskedMSELoss
false
14,125
[ "MIT" ]
488
0c9dcb9cc5234a7ca8cf6af81aabe28ef3814d0e
https://github.com/NouamaneTazi/conv-emotion/tree/0c9dcb9cc5234a7ca8cf6af81aabe28ef3814d0e
GraphAttentionLayer
import torch import torch.utils.data import torch import torch.nn as nn import torch.nn.functional as F class GraphAttentionLayer(nn.Module): """ Simple GAT layer, similar to https://arxiv.org/abs/1710.10903 """ def __init__(self, in_features, out_features, dropout, alpha, concat=True): super(GraphAttentionLayer, self).__init__() self.dropout = dropout self.in_features = in_features self.out_features = out_features self.alpha = alpha self.concat = concat self.W = nn.Parameter(torch.empty(size=(in_features, out_features))) nn.init.xavier_uniform_(self.W.data, gain=1.414) self.a = nn.Parameter(torch.empty(size=(2 * out_features, 1))) nn.init.xavier_uniform_(self.a.data, gain=1.414) self.leakyrelu = nn.LeakyReLU(self.alpha) def forward(self, h, adj): Wh = torch.bmm(h, self.W.repeat(h.size(0), 1, 1)) a_input = self._prepare_attentional_mechanism_input(Wh) e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(3)) zero_vec = -9000000000000000.0 * torch.ones_like(e) attention = torch.where(adj > 0, e, zero_vec) attention = F.softmax(attention, dim=2) attention = F.dropout(attention, self.dropout, training=self.training) h_prime = torch.matmul(attention, Wh) h_prime = F.leaky_relu(h_prime) return h_prime, attention def _prepare_attentional_mechanism_input(self, Wh): N = Wh.size()[1] Wh_repeated_in_chunks = Wh.repeat_interleave(N, dim=1) Wh_repeated_alternating = Wh.repeat([1, N, 1]) all_combinations_matrix = torch.cat([Wh_repeated_in_chunks, Wh_repeated_alternating], dim=2) return all_combinations_matrix.view(Wh.size(0), N, N, 2 * self. out_features) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4, 'dropout': 0.5, 'alpha': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_repeat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 % 16 x2 = xindex // 128 x3 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * (x1 // 4) + 16 * x2 + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr0 + (4 * (x1 % 4) + 16 * x2 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_leaky_relu_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_leaky_relu_mul_where_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp9 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp16 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp23 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp3 = 4.0 tmp4 = tmp2 * tmp3 tmp5 = tl.where(tmp1, tmp2, tmp4) tmp6 = -8999999815811072.0 tmp7 = tl.where(tmp0, tmp5, tmp6) tmp11 = tmp10 * tmp3 tmp12 = tl.where(tmp9, tmp10, tmp11) tmp13 = tl.where(tmp8, tmp12, tmp6) tmp14 = triton_helpers.maximum(tmp7, tmp13) tmp18 = tmp17 * tmp3 tmp19 = tl.where(tmp16, tmp17, tmp18) tmp20 = tl.where(tmp15, tmp19, tmp6) tmp21 = triton_helpers.maximum(tmp14, tmp20) tmp25 = tmp24 * tmp3 tmp26 = tl.where(tmp23, tmp24, tmp25) tmp27 = tl.where(tmp22, tmp26, tmp6) tmp28 = triton_helpers.maximum(tmp21, tmp27) tmp29 = tmp7 - tmp28 tmp30 = tl_math.exp(tmp29) tmp31 = tmp13 - tmp28 tmp32 = tl_math.exp(tmp31) tmp33 = tmp30 + tmp32 tmp34 = tmp20 - tmp28 tmp35 = tl_math.exp(tmp34) tmp36 = tmp33 + tmp35 tmp37 = tmp27 - tmp28 tmp38 = tl_math.exp(tmp37) tmp39 = tmp36 + tmp38 tl.store(out_ptr0 + x0, tmp28, xmask) tl.store(out_ptr1 + x0, tmp39, xmask) @triton.jit def triton_poi_fused__softmax_leaky_relu_mul_where_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + x2, xmask).to(tl.int1) tmp2 = tl.load(in_out_ptr0 + x2, xmask) tmp8 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp3 = 4.0 tmp4 = tmp2 * tmp3 tmp5 = tl.where(tmp1, tmp2, tmp4) tmp6 = -8999999815811072.0 tmp7 = tl.where(tmp0, tmp5, tmp6) tmp9 = tmp7 - tmp8 tmp10 = tl_math.exp(tmp9) tmp12 = tmp10 / tmp11 tl.store(in_out_ptr0 + x2, tmp12, xmask) @triton.jit def triton_poi_fused_leaky_relu_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.01 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr1 + x0, tmp5, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (8, 1), (1, 1)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_repeat_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(primals_2, buf0, out=buf1) buf2 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32) triton_poi_fused_cat_1[grid(512)](buf1, buf2, 512, XBLOCK=256, num_warps=4, num_stages=1) buf3 = reinterpret_tensor(buf0, (64, 1), (1, 1), 0) del buf0 extern_kernels.mm(reinterpret_tensor(buf2, (64, 8), (8, 1), 0), primals_3, out=buf3) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_leaky_relu_2[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_leaky_relu_2[grid(64)](primals_4, buf5, 64, XBLOCK =64, num_warps=1, num_stages=1) del primals_4 buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused__softmax_leaky_relu_mul_where_3[grid(16)](buf5, buf4, buf3, buf6, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) buf8 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0) del buf3 triton_poi_fused__softmax_leaky_relu_mul_where_4[grid(64)](buf8, buf5, buf4, buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf6 del buf7 buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf8, buf1, out=buf9) buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_leaky_relu_5[grid(64)](buf9, buf10, buf11, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf9 return buf11, buf8, buf4, buf5, buf8, buf10, reinterpret_tensor(buf1, ( 4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf2, (8, 64), (1, 8), 0 ), reinterpret_tensor(primals_3, (1, 8), (1, 1), 0 ), reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0) class GraphAttentionLayerNew(nn.Module): """ Simple GAT layer, similar to https://arxiv.org/abs/1710.10903 """ def __init__(self, in_features, out_features, dropout, alpha, concat=True): super(GraphAttentionLayerNew, self).__init__() self.dropout = dropout self.in_features = in_features self.out_features = out_features self.alpha = alpha self.concat = concat self.W = nn.Parameter(torch.empty(size=(in_features, out_features))) nn.init.xavier_uniform_(self.W.data, gain=1.414) self.a = nn.Parameter(torch.empty(size=(2 * out_features, 1))) nn.init.xavier_uniform_(self.a.data, gain=1.414) self.leakyrelu = nn.LeakyReLU(self.alpha) def _prepare_attentional_mechanism_input(self, Wh): N = Wh.size()[1] Wh_repeated_in_chunks = Wh.repeat_interleave(N, dim=1) Wh_repeated_alternating = Wh.repeat([1, N, 1]) all_combinations_matrix = torch.cat([Wh_repeated_in_chunks, Wh_repeated_alternating], dim=2) return all_combinations_matrix.view(Wh.size(0), N, N, 2 * self. out_features) def forward(self, input_0, input_1): primals_1 = self.W primals_3 = self.a primals_2 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0], output[1]
Nmegha2601/activitygraph_transformer
GraphAttentionLayer
false
14,126
[ "MIT" ]
63
4e21a4ea12527df470b7586d149fa4168a41307c
https://github.com/Nmegha2601/activitygraph_transformer/tree/4e21a4ea12527df470b7586d149fa4168a41307c
MaskedSoftmax
import torch from torch.nn import functional as F import torch.utils.data import torch.nn as nn class MaskedSoftmax(nn.Module): def __init__(self, dim): super(MaskedSoftmax, self).__init__() self.dim = dim def forward(self, logit, mask=None): if mask is None: dist = F.softmax(logit - torch.max(logit, dim=self.dim, keepdim =True)[0], dim=self.dim) else: dist_ = F.softmax(logit - torch.max(logit, dim=self.dim, keepdim=True)[0], dim=self.dim) * mask normalization_factor = dist_.sum(self.dim, keepdim=True) dist = dist_ / normalization_factor return dist def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_sub_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(1024)](buf0, buf1, 1024, XBLOCK= 256, num_warps=4, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_2[grid(1024)](buf1, buf2, 1024, XBLOCK= 256, num_warps=4, num_stages=1) del buf1 return buf2, class MaskedSoftmaxNew(nn.Module): def __init__(self, dim): super(MaskedSoftmaxNew, self).__init__() self.dim = dim def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Nullius-2020/TAKG-Paddle
MaskedSoftmax
false
14,127
[ "MIT" ]
130
7ebb5c4cdd1d2c68b1ca4a518b73c5e815fc5812
https://github.com/Nullius-2020/TAKG-Paddle/tree/7ebb5c4cdd1d2c68b1ca4a518b73c5e815fc5812
DAModule
import torch import numpy as np from torch import nn from torch.nn import init class ScaledDotProductAttention(nn.Module): """ Scaled dot-product attention """ def __init__(self, d_model, d_k, d_v, h, dropout=0.1): """ :param d_model: Output dimensionality of the model :param d_k: Dimensionality of queries and keys :param d_v: Dimensionality of values :param h: Number of heads """ super(ScaledDotProductAttention, self).__init__() self.fc_q = nn.Linear(d_model, h * d_k) self.fc_k = nn.Linear(d_model, h * d_k) self.fc_v = nn.Linear(d_model, h * d_v) self.fc_o = nn.Linear(h * d_v, d_model) self.dropout = nn.Dropout(dropout) self.d_model = d_model self.d_k = d_k self.d_v = d_v self.h = h self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.001) if m.bias is not None: init.constant_(m.bias, 0) def forward(self, queries, keys, values, attention_mask=None, attention_weights=None): """ Computes :param queries: Queries (b_s, nq, d_model) :param keys: Keys (b_s, nk, d_model) :param values: Values (b_s, nk, d_model) :param attention_mask: Mask over attention values (b_s, h, nq, nk). True indicates masking. :param attention_weights: Multiplicative weights for attention values (b_s, h, nq, nk). :return: """ b_s, nq = queries.shape[:2] nk = keys.shape[1] q = self.fc_q(queries).view(b_s, nq, self.h, self.d_k).permute(0, 2, 1, 3) k = self.fc_k(keys).view(b_s, nk, self.h, self.d_k).permute(0, 2, 3, 1) v = self.fc_v(values).view(b_s, nk, self.h, self.d_v).permute(0, 2, 1, 3) att = torch.matmul(q, k) / np.sqrt(self.d_k) if attention_weights is not None: att = att * attention_weights if attention_mask is not None: att = att.masked_fill(attention_mask, -np.inf) att = torch.softmax(att, -1) att = self.dropout(att) out = torch.matmul(att, v).permute(0, 2, 1, 3).contiguous().view(b_s, nq, self.h * self.d_v) out = self.fc_o(out) return out class PositionAttentionModule(nn.Module): def __init__(self, d_model=512, kernel_size=3, H=7, W=7): super().__init__() self.cnn = nn.Conv2d(d_model, d_model, kernel_size=kernel_size, padding=(kernel_size - 1) // 2) self.pa = ScaledDotProductAttention(d_model, d_k=d_model, d_v= d_model, h=1) def forward(self, x): bs, c, _h, _w = x.shape y = self.cnn(x) y = y.view(bs, c, -1).permute(0, 2, 1) y = self.pa(y, y, y) return y class SimplifiedScaledDotProductAttention(nn.Module): """ Scaled dot-product attention """ def __init__(self, d_model, h, dropout=0.1): """ :param d_model: Output dimensionality of the model :param d_k: Dimensionality of queries and keys :param d_v: Dimensionality of values :param h: Number of heads """ super(SimplifiedScaledDotProductAttention, self).__init__() self.d_model = d_model self.d_k = d_model // h self.d_v = d_model // h self.h = h self.fc_o = nn.Linear(h * self.d_v, d_model) self.dropout = nn.Dropout(dropout) self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.001) if m.bias is not None: init.constant_(m.bias, 0) def forward(self, queries, keys, values, attention_mask=None, attention_weights=None): """ Computes :param queries: Queries (b_s, nq, d_model) :param keys: Keys (b_s, nk, d_model) :param values: Values (b_s, nk, d_model) :param attention_mask: Mask over attention values (b_s, h, nq, nk). True indicates masking. :param attention_weights: Multiplicative weights for attention values (b_s, h, nq, nk). :return: """ b_s, nq = queries.shape[:2] nk = keys.shape[1] q = queries.view(b_s, nq, self.h, self.d_k).permute(0, 2, 1, 3) k = keys.view(b_s, nk, self.h, self.d_k).permute(0, 2, 3, 1) v = values.view(b_s, nk, self.h, self.d_v).permute(0, 2, 1, 3) att = torch.matmul(q, k) / np.sqrt(self.d_k) if attention_weights is not None: att = att * attention_weights if attention_mask is not None: att = att.masked_fill(attention_mask, -np.inf) att = torch.softmax(att, -1) att = self.dropout(att) out = torch.matmul(att, v).permute(0, 2, 1, 3).contiguous().view(b_s, nq, self.h * self.d_v) out = self.fc_o(out) return out class ChannelAttentionModule(nn.Module): def __init__(self, d_model=512, kernel_size=3, H=7, W=7): super().__init__() self.cnn = nn.Conv2d(d_model, d_model, kernel_size=kernel_size, padding=(kernel_size - 1) // 2) self.pa = SimplifiedScaledDotProductAttention(H * W, h=1) def forward(self, x): bs, c, _h, _w = x.shape y = self.cnn(x) y = y.view(bs, c, -1) y = self.pa(y, y, y) return y class DAModule(nn.Module): def __init__(self, d_model=512, kernel_size=3, H=7, W=7): super().__init__() self.position_attention_module = PositionAttentionModule(d_model= 512, kernel_size=3, H=7, W=7) self.channel_attention_module = ChannelAttentionModule(d_model=512, kernel_size=3, H=7, W=7) def forward(self, input): bs, c, h, w = input.shape p_out = self.position_attention_module(input) c_out = self.channel_attention_module(input) p_out = p_out.permute(0, 2, 1).view(bs, c, h, w) c_out = c_out.view(bs, c, h, w) return p_out + c_out def get_inputs(): return [torch.rand([4, 512, 1, 49])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np from torch import nn from torch.nn import init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 49 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 49 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 512 * x2 + 25088 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_clone_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, None) @triton.jit def triton_per_fused__softmax_sqrt_3(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 196 rnumel = 49 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex x2 = xindex % 49 x3 = xindex // 49 tmp0 = tl.load(in_ptr0 + (r1 + 49 * x0), rmask & xmask, other=0.0) tmp1 = tl.full([1, 1], 22.62741699796952, tl.float64) tmp2 = tl.full([1, 1], 0.0, tl.float64) tmp3 = tmp1 >= tmp2 tmp4 = 1.0 tmp5 = -1.0 tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = tmp0 * tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = tl.where(rmask & xmask, tmp8, float('-inf')) tmp11 = triton_helpers.max2(tmp10, 1)[:, None] tmp12 = tmp7 - tmp11 tmp13 = tmp6.to(tl.float64) tmp14 = tmp13 * tmp1 tmp15 = tmp14.to(tl.float32) tmp16 = tmp12 / tmp15 tmp17 = tl_math.exp(tmp16) tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.where(rmask & xmask, tmp18, 0) tmp21 = tl.sum(tmp20, 1)[:, None] tmp22 = tmp17 / tmp21 tl.store(out_ptr2 + (r1 + 49 * x2 + 2432 * x3), tmp22, rmask & xmask) @triton.jit def triton_per_fused__softmax_sqrt_4(in_ptr0, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 512 * x0), None) tmp1 = tl.full([1], 7.0, tl.float64) tmp2 = tl.full([1], 0.0, tl.float64) tmp3 = tmp1 >= tmp2 tmp4 = 1.0 tmp5 = -1.0 tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = tmp0 * tmp6 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp8, 0)) tmp11 = tmp7 - tmp10 tmp12 = tmp6.to(tl.float64) tmp13 = tmp12 * tmp1 tmp14 = tmp13.to(tl.float32) tmp15 = tmp11 / tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tl.broadcast_to(tmp16, [RBLOCK]) tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0)) tmp20 = tmp16 / tmp19 tl.store(out_ptr2 + (r1 + 512 * x0), tmp20, None) @triton.jit def triton_poi_fused_add_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 196 xnumel = 512 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 49 y1 = yindex // 49 tmp0 = tl.load(in_out_ptr0 + (x2 + 512 * y3), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (y0 + 49 * x2 + 25088 * y1), xmask & ymask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (x2 + 512 * y3), tmp6, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15) = args args.clear() assert_size_stride(primals_1, (4, 512, 1, 49), (25088, 49, 49, 1)) assert_size_stride(primals_2, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_3, (512,), (1,)) assert_size_stride(primals_4, (512, 512), (512, 1)) assert_size_stride(primals_5, (512,), (1,)) assert_size_stride(primals_6, (512, 512), (512, 1)) assert_size_stride(primals_7, (512,), (1,)) assert_size_stride(primals_8, (512, 512), (512, 1)) assert_size_stride(primals_9, (512,), (1,)) assert_size_stride(primals_10, (512, 512), (512, 1)) assert_size_stride(primals_11, (512,), (1,)) assert_size_stride(primals_12, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_13, (512,), (1,)) assert_size_stride(primals_14, (49, 49), (49, 1)) assert_size_stride(primals_15, (49,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512, 1, 49), (25088, 1, 25088, 512), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(2048, 49)](primals_1, buf0, 2048, 49, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_1[grid(262144, 9)](primals_2, buf1, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_1[grid(262144, 9)](primals_12, buf2, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_12 buf3 = extern_kernels.convolution(buf0, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 512, 1, 49), (25088, 1, 25088, 512)) buf4 = reinterpret_tensor(buf3, (4, 49, 512), (25088, 512, 1), 0) del buf3 triton_poi_fused_clone_2[grid(100352)](buf4, primals_3, 100352, XBLOCK=1024, num_warps=4, num_stages=1) del primals_3 buf5 = empty_strided_cuda((196, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (196, 512), (512, 1), 0), reinterpret_tensor(primals_4, (512, 512), (1, 512), 0), out=buf5) buf6 = empty_strided_cuda((196, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (196, 512), (512, 1), 0), reinterpret_tensor(primals_6, (512, 512), (1, 512), 0), out=buf6) buf7 = empty_strided_cuda((196, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (196, 512), (512, 1), 0), reinterpret_tensor(primals_8, (512, 512), (1, 512), 0), out=buf7) buf8 = reinterpret_tensor(buf5, (4, 49, 512), (25088, 512, 1), 0) del buf5 triton_poi_fused_clone_2[grid(100352)](buf8, primals_5, 100352, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf9 = reinterpret_tensor(buf6, (4, 49, 512), (25088, 512, 1), 0) del buf6 triton_poi_fused_clone_2[grid(100352)](buf9, primals_7, 100352, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 49, 49), (2401, 49, 1), torch.float32) extern_kernels.bmm(buf8, reinterpret_tensor(buf9, (4, 512, 49), ( 25088, 1, 512), 0), out=buf10) buf13 = empty_strided_cuda((4, 1, 49, 49), (2432, 49, 49, 1), torch .float32) triton_per_fused__softmax_sqrt_3[grid(196)](buf10, buf13, 196, 49, XBLOCK=1, num_warps=2, num_stages=1) del buf10 buf14 = reinterpret_tensor(buf7, (4, 49, 512), (25088, 512, 1), 0) del buf7 triton_poi_fused_clone_2[grid(100352)](buf14, primals_9, 100352, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf15 = empty_strided_cuda((4, 49, 512), (25088, 512, 1), torch.float32 ) extern_kernels.bmm(reinterpret_tensor(buf13, (4, 49, 49), (2432, 49, 1), 0), buf14, out=buf15) buf16 = empty_strided_cuda((196, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf15, (196, 512), (512, 1), 0 ), reinterpret_tensor(primals_10, (512, 512), (1, 512), 0), out =buf16) buf17 = extern_kernels.convolution(buf0, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 512, 1, 49), (25088, 1, 25088, 512)) buf18 = buf17 del buf17 triton_poi_fused_clone_2[grid(100352)](buf18, primals_13, 100352, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf19 = empty_strided_cuda((4, 512, 512), (262144, 512, 1), torch. float32) extern_kernels.bmm(reinterpret_tensor(buf18, (4, 512, 49), (25088, 1, 512), 0), reinterpret_tensor(buf18, (4, 49, 512), (25088, 512, 1), 0), out=buf19) buf22 = empty_strided_cuda((4, 1, 512, 512), (262144, 1, 512, 1), torch.float32) triton_per_fused__softmax_sqrt_4[grid(2048)](buf19, buf22, 2048, 512, num_warps=4, num_stages=1) del buf19 buf23 = empty_strided_cuda((4, 512, 49), (25088, 49, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf22, (4, 512, 512), (262144, 512, 1), 0), reinterpret_tensor(buf18, (4, 512, 49), (25088, 1, 512), 0), out=buf23) buf24 = empty_strided_cuda((2048, 49), (49, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf23, (2048, 49), (49, 1), 0), reinterpret_tensor(primals_14, (49, 49), (1, 49), 0), out=buf24) buf25 = reinterpret_tensor(buf16, (4, 512, 1, 49), (25088, 1, 25088, 512), 0) del buf16 triton_poi_fused_add_5[grid(196, 512)](buf25, primals_11, buf24, primals_15, 196, 512, XBLOCK=16, YBLOCK=256, num_warps=8, num_stages=1) del buf24 del primals_11 del primals_15 return buf25, buf0, buf1, buf2, reinterpret_tensor(buf4, (196, 512), ( 512, 1), 0), buf13, reinterpret_tensor(buf15, (196, 512), (512, 1), 0 ), buf18, buf22, reinterpret_tensor(buf23, (2048, 49), (49, 1), 0 ), primals_14, primals_10, reinterpret_tensor(buf14, (4, 512, 49), (25088, 1, 512), 0), reinterpret_tensor(buf8, (4, 512, 49), (25088, 1, 512), 0), buf9, primals_8, primals_6, primals_4 class ScaledDotProductAttention(nn.Module): """ Scaled dot-product attention """ def __init__(self, d_model, d_k, d_v, h, dropout=0.1): """ :param d_model: Output dimensionality of the model :param d_k: Dimensionality of queries and keys :param d_v: Dimensionality of values :param h: Number of heads """ super(ScaledDotProductAttention, self).__init__() self.fc_q = nn.Linear(d_model, h * d_k) self.fc_k = nn.Linear(d_model, h * d_k) self.fc_v = nn.Linear(d_model, h * d_v) self.fc_o = nn.Linear(h * d_v, d_model) self.dropout = nn.Dropout(dropout) self.d_model = d_model self.d_k = d_k self.d_v = d_v self.h = h self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.001) if m.bias is not None: init.constant_(m.bias, 0) def forward(self, queries, keys, values, attention_mask=None, attention_weights=None): """ Computes :param queries: Queries (b_s, nq, d_model) :param keys: Keys (b_s, nk, d_model) :param values: Values (b_s, nk, d_model) :param attention_mask: Mask over attention values (b_s, h, nq, nk). True indicates masking. :param attention_weights: Multiplicative weights for attention values (b_s, h, nq, nk). :return: """ b_s, nq = queries.shape[:2] nk = keys.shape[1] q = self.fc_q(queries).view(b_s, nq, self.h, self.d_k).permute(0, 2, 1, 3) k = self.fc_k(keys).view(b_s, nk, self.h, self.d_k).permute(0, 2, 3, 1) v = self.fc_v(values).view(b_s, nk, self.h, self.d_v).permute(0, 2, 1, 3) att = torch.matmul(q, k) / np.sqrt(self.d_k) if attention_weights is not None: att = att * attention_weights if attention_mask is not None: att = att.masked_fill(attention_mask, -np.inf) att = torch.softmax(att, -1) att = self.dropout(att) out = torch.matmul(att, v).permute(0, 2, 1, 3).contiguous().view(b_s, nq, self.h * self.d_v) out = self.fc_o(out) return out class PositionAttentionModule(nn.Module): def __init__(self, d_model=512, kernel_size=3, H=7, W=7): super().__init__() self.cnn = nn.Conv2d(d_model, d_model, kernel_size=kernel_size, padding=(kernel_size - 1) // 2) self.pa = ScaledDotProductAttention(d_model, d_k=d_model, d_v= d_model, h=1) def forward(self, x): bs, c, _h, _w = x.shape y = self.cnn(x) y = y.view(bs, c, -1).permute(0, 2, 1) y = self.pa(y, y, y) return y class SimplifiedScaledDotProductAttention(nn.Module): """ Scaled dot-product attention """ def __init__(self, d_model, h, dropout=0.1): """ :param d_model: Output dimensionality of the model :param d_k: Dimensionality of queries and keys :param d_v: Dimensionality of values :param h: Number of heads """ super(SimplifiedScaledDotProductAttention, self).__init__() self.d_model = d_model self.d_k = d_model // h self.d_v = d_model // h self.h = h self.fc_o = nn.Linear(h * self.d_v, d_model) self.dropout = nn.Dropout(dropout) self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.001) if m.bias is not None: init.constant_(m.bias, 0) def forward(self, queries, keys, values, attention_mask=None, attention_weights=None): """ Computes :param queries: Queries (b_s, nq, d_model) :param keys: Keys (b_s, nk, d_model) :param values: Values (b_s, nk, d_model) :param attention_mask: Mask over attention values (b_s, h, nq, nk). True indicates masking. :param attention_weights: Multiplicative weights for attention values (b_s, h, nq, nk). :return: """ b_s, nq = queries.shape[:2] nk = keys.shape[1] q = queries.view(b_s, nq, self.h, self.d_k).permute(0, 2, 1, 3) k = keys.view(b_s, nk, self.h, self.d_k).permute(0, 2, 3, 1) v = values.view(b_s, nk, self.h, self.d_v).permute(0, 2, 1, 3) att = torch.matmul(q, k) / np.sqrt(self.d_k) if attention_weights is not None: att = att * attention_weights if attention_mask is not None: att = att.masked_fill(attention_mask, -np.inf) att = torch.softmax(att, -1) att = self.dropout(att) out = torch.matmul(att, v).permute(0, 2, 1, 3).contiguous().view(b_s, nq, self.h * self.d_v) out = self.fc_o(out) return out class ChannelAttentionModule(nn.Module): def __init__(self, d_model=512, kernel_size=3, H=7, W=7): super().__init__() self.cnn = nn.Conv2d(d_model, d_model, kernel_size=kernel_size, padding=(kernel_size - 1) // 2) self.pa = SimplifiedScaledDotProductAttention(H * W, h=1) def forward(self, x): bs, c, _h, _w = x.shape y = self.cnn(x) y = y.view(bs, c, -1) y = self.pa(y, y, y) return y class DAModuleNew(nn.Module): def __init__(self, d_model=512, kernel_size=3, H=7, W=7): super().__init__() self.position_attention_module = PositionAttentionModule(d_model= 512, kernel_size=3, H=7, W=7) self.channel_attention_module = ChannelAttentionModule(d_model=512, kernel_size=3, H=7, W=7) def forward(self, input_0): primals_2 = self.position_attention_module.cnn.weight primals_3 = self.position_attention_module.cnn.bias primals_4 = self.position_attention_module.pa.fc_q.weight primals_5 = self.position_attention_module.pa.fc_q.bias primals_6 = self.position_attention_module.pa.fc_k.weight primals_7 = self.position_attention_module.pa.fc_k.bias primals_8 = self.position_attention_module.pa.fc_v.weight primals_9 = self.position_attention_module.pa.fc_v.bias primals_10 = self.position_attention_module.pa.fc_o.weight primals_11 = self.position_attention_module.pa.fc_o.bias primals_12 = self.channel_attention_module.cnn.weight primals_13 = self.channel_attention_module.cnn.bias primals_14 = self.channel_attention_module.pa.fc_o.weight primals_15 = self.channel_attention_module.pa.fc_o.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return output[0]
Nitin-Mane/External-Attention-pytorch
DAModule
false
14,128
[ "MIT" ]
4,466
1ceda306c41063af11c956334747763444a4d83f
https://github.com/Nitin-Mane/External-Attention-pytorch/tree/1ceda306c41063af11c956334747763444a4d83f
Attention
import torch import torch.nn as nn def masked_softmax(x, m=None, axis=-1): """ Softmax with mask (optional) """ x = torch.clamp(x, min=-15.0, max=15.0) if m is not None: m = m.float() x = x * m e_x = torch.exp(x - torch.max(x, dim=axis, keepdim=True)[0]) if m is not None: e_x = e_x * m softmax = e_x / (torch.sum(e_x, dim=axis, keepdim=True) + 1e-06) return softmax class TimeDistributedDense(torch.nn.Module): """ input: x: batch x time x a mask: batch x time output: y: batch x time x b """ def __init__(self, mlp): super(TimeDistributedDense, self).__init__() self.mlp = mlp def forward(self, x, mask=None): x_size = x.size() x = x.view(-1, x_size[-1]) y = self.mlp.forward(x) y = y.view(x_size[:-1] + (y.size(-1),)) if mask is not None: y = y * mask.unsqueeze(-1) return y class Attention(nn.Module): def __init__(self, enc_dim, trg_dim, method='general'): super(Attention, self).__init__() self.method = method if self.method == 'general': self.attn = nn.Linear(enc_dim, trg_dim) elif self.method == 'concat': attn = nn.Linear(enc_dim + trg_dim, trg_dim) v = nn.Linear(trg_dim, 1) self.attn = TimeDistributedDense(mlp=attn) self.v = TimeDistributedDense(mlp=v) self.softmax = nn.Softmax() if self.method == 'dot': self.linear_out = nn.Linear(2 * trg_dim, trg_dim, bias=False) else: self.linear_out = nn.Linear(enc_dim + trg_dim, trg_dim, bias=False) self.tanh = nn.Tanh() def score(self, hiddens, encoder_outputs, encoder_mask=None): """ :param hiddens: (batch, trg_len, trg_hidden_dim) :param encoder_outputs: (batch, src_len, src_hidden_dim) :return: energy score (batch, trg_len, src_len) """ if self.method == 'dot': energies = torch.bmm(hiddens, encoder_outputs.transpose(1, 2)) elif self.method == 'general': energies = self.attn(encoder_outputs) if encoder_mask is not None: energies = energies * encoder_mask.view(encoder_mask.size(0 ), encoder_mask.size(1), 1) energies = torch.bmm(hiddens, energies.transpose(1, 2)) elif self.method == 'concat': energies = [] encoder_outputs.size(0) src_len = encoder_outputs.size(1) for i in range(hiddens.size(1)): hidden_i = hiddens[:, i:i + 1, :].expand(-1, src_len, -1) concated = torch.cat((hidden_i, encoder_outputs), 2) if encoder_mask is not None: concated = concated * encoder_mask.view(encoder_mask. size(0), encoder_mask.size(1), 1) energy = self.tanh(self.attn(concated, encoder_mask)) if encoder_mask is not None: energy = energy * encoder_mask.view(encoder_mask.size(0 ), encoder_mask.size(1), 1) energy = self.v(energy, encoder_mask).squeeze(-1) energies.append(energy) energies = torch.stack(energies, dim=1) if encoder_mask is not None: energies = energies * encoder_mask.view(encoder_mask.size(0 ), 1, encoder_mask.size(1)) return energies.contiguous() def forward(self, hidden, encoder_outputs, encoder_mask=None): """ Compute the attention and h_tilde, inputs/outputs must be batch first :param hidden: (batch_size, trg_len, trg_hidden_dim) :param encoder_outputs: (batch_size, src_len, trg_hidden_dim), if this is dot attention, you have to convert enc_dim to as same as trg_dim first :return: h_tilde (batch_size, trg_len, trg_hidden_dim) attn_weights (batch_size, trg_len, src_len) attn_energies (batch_size, trg_len, src_len): the attention energies before softmax """ """ # Create variable to store attention energies attn_energies = Variable(torch.zeros(encoder_outputs.size(0), encoder_outputs.size(1))) # src_seq_len * batch_size if torch.cuda.is_available(): attn_energies = attn_energies.cuda() # Calculate energies for each encoder output for i in range(encoder_outputs.size(0)): attn_energies[i] = self.score(hidden, encoder_outputs[i]) # Normalize energies to weights in range 0 to 1, transpose to (batch_size * src_seq_len) attn = torch.nn.functional.softmax(attn_energies.t()) # get the weighted context, (batch_size, src_layer_number * src_encoder_dim) weighted_context = torch.bmm(encoder_outputs.permute(1, 2, 0), attn.unsqueeze(2)).squeeze(2) # (batch_size, src_hidden_dim * num_directions) """ batch_size = hidden.size(0) src_len = encoder_outputs.size(1) trg_len = hidden.size(1) context_dim = encoder_outputs.size(2) trg_hidden_dim = hidden.size(2) attn_energies = self.score(hidden, encoder_outputs) if encoder_mask is None: attn_weights = torch.nn.functional.softmax(attn_energies.view(- 1, src_len), dim=1).view(batch_size, trg_len, src_len) else: attn_energies = attn_energies * encoder_mask.view(encoder_mask. size(0), 1, encoder_mask.size(1)) attn_weights = masked_softmax(attn_energies, encoder_mask.view( encoder_mask.size(0), 1, encoder_mask.size(1)), -1) weighted_context = torch.bmm(attn_weights, encoder_outputs) h_tilde = torch.cat((weighted_context, hidden), 2) h_tilde = self.tanh(self.linear_out(h_tilde.view(-1, context_dim + trg_hidden_dim))) return h_tilde.view(batch_size, trg_len, trg_hidden_dim ), attn_weights, attn_energies def forward_(self, hidden, context): """ Original forward for DotAttention, it doesn't work if the dim of encoder and decoder are not same input and context must be in same dim: return Softmax(hidden.dot([c for c in context])) input: batch x hidden_dim context: batch x source_len x hidden_dim """ target = self.linear_in(hidden).unsqueeze(2) attn = torch.bmm(context, target).squeeze(2) attn = self.softmax(attn) attn3 = attn.view(attn.size(0), 1, attn.size(1)) weighted_context = torch.bmm(attn3, context).squeeze(1) h_tilde = torch.cat((weighted_context, hidden), 1) h_tilde = self.tanh(self.linear_out(h_tilde)) return h_tilde, attn def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'enc_dim': 4, 'trg_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_tanh_tanh_backward_3(in_out_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tmp2 = tmp1 * tmp1 tmp3 = 1.0 tmp4 = tmp3 - tmp2 tl.store(in_out_ptr0 + x0, tmp1, xmask) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 8), (8, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_3 del primals_4 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(primals_1, reinterpret_tensor(buf0, (4, 4, 4), ( 16, 1, 4), 0), out=buf1) buf2 = buf0 del buf0 get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused__softmax_1[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0), primals_2, out=buf4) buf5 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) triton_poi_fused_cat_2[grid(128)](buf4, primals_1, buf5, 128, XBLOCK=128, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (16, 4), (4, 1), 0) del buf4 extern_kernels.mm(reinterpret_tensor(buf5, (16, 8), (8, 1), 0), reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), out=buf6) buf7 = buf6 del buf6 buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused_tanh_tanh_backward_3[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) return reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0 ), buf1, primals_2, buf3, reinterpret_tensor(buf5, (16, 8), (8, 1), 0 ), buf8, primals_5, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0) def masked_softmax(x, m=None, axis=-1): """ Softmax with mask (optional) """ x = torch.clamp(x, min=-15.0, max=15.0) if m is not None: m = m.float() x = x * m e_x = torch.exp(x - torch.max(x, dim=axis, keepdim=True)[0]) if m is not None: e_x = e_x * m softmax = e_x / (torch.sum(e_x, dim=axis, keepdim=True) + 1e-06) return softmax class TimeDistributedDense(torch.nn.Module): """ input: x: batch x time x a mask: batch x time output: y: batch x time x b """ def __init__(self, mlp): super(TimeDistributedDense, self).__init__() self.mlp = mlp def forward(self, x, mask=None): x_size = x.size() x = x.view(-1, x_size[-1]) y = self.mlp.forward(x) y = y.view(x_size[:-1] + (y.size(-1),)) if mask is not None: y = y * mask.unsqueeze(-1) return y class AttentionNew(nn.Module): def __init__(self, enc_dim, trg_dim, method='general'): super(AttentionNew, self).__init__() self.method = method if self.method == 'general': self.attn = nn.Linear(enc_dim, trg_dim) elif self.method == 'concat': attn = nn.Linear(enc_dim + trg_dim, trg_dim) v = nn.Linear(trg_dim, 1) self.attn = TimeDistributedDense(mlp=attn) self.v = TimeDistributedDense(mlp=v) self.softmax = nn.Softmax() if self.method == 'dot': self.linear_out = nn.Linear(2 * trg_dim, trg_dim, bias=False) else: self.linear_out = nn.Linear(enc_dim + trg_dim, trg_dim, bias=False) self.tanh = nn.Tanh() def score(self, hiddens, encoder_outputs, encoder_mask=None): """ :param hiddens: (batch, trg_len, trg_hidden_dim) :param encoder_outputs: (batch, src_len, src_hidden_dim) :return: energy score (batch, trg_len, src_len) """ if self.method == 'dot': energies = torch.bmm(hiddens, encoder_outputs.transpose(1, 2)) elif self.method == 'general': energies = self.attn(encoder_outputs) if encoder_mask is not None: energies = energies * encoder_mask.view(encoder_mask.size(0 ), encoder_mask.size(1), 1) energies = torch.bmm(hiddens, energies.transpose(1, 2)) elif self.method == 'concat': energies = [] encoder_outputs.size(0) src_len = encoder_outputs.size(1) for i in range(hiddens.size(1)): hidden_i = hiddens[:, i:i + 1, :].expand(-1, src_len, -1) concated = torch.cat((hidden_i, encoder_outputs), 2) if encoder_mask is not None: concated = concated * encoder_mask.view(encoder_mask. size(0), encoder_mask.size(1), 1) energy = self.tanh(self.attn(concated, encoder_mask)) if encoder_mask is not None: energy = energy * encoder_mask.view(encoder_mask.size(0 ), encoder_mask.size(1), 1) energy = self.v(energy, encoder_mask).squeeze(-1) energies.append(energy) energies = torch.stack(energies, dim=1) if encoder_mask is not None: energies = energies * encoder_mask.view(encoder_mask.size(0 ), 1, encoder_mask.size(1)) return energies.contiguous() def forward_(self, hidden, context): """ Original forward for DotAttention, it doesn't work if the dim of encoder and decoder are not same input and context must be in same dim: return Softmax(hidden.dot([c for c in context])) input: batch x hidden_dim context: batch x source_len x hidden_dim """ target = self.linear_in(hidden).unsqueeze(2) attn = torch.bmm(context, target).squeeze(2) attn = self.softmax(attn) attn3 = attn.view(attn.size(0), 1, attn.size(1)) weighted_context = torch.bmm(attn3, context).squeeze(1) h_tilde = torch.cat((weighted_context, hidden), 1) h_tilde = self.tanh(self.linear_out(h_tilde)) return h_tilde, attn def forward(self, input_0, input_1): primals_3 = self.attn.weight primals_4 = self.attn.bias primals_5 = self.linear_out.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1], output[2]
NoteXYX/ACL2017
Attention
false
14,129
[ "Apache-2.0" ]
119
436f59f2aa0044a9d57c95a2a58b2158cb99738d
https://github.com/NoteXYX/ACL2017/tree/436f59f2aa0044a9d57c95a2a58b2158cb99738d
FastRCNNPredictor
import torch import torch.nn.functional as F from torch import nn class FastRCNNPredictor(nn.Module): def __init__(self, in_channels, mid_channels, num_classes): super().__init__() self.fc1 = nn.Linear(in_channels, mid_channels) self.fc2 = nn.Linear(mid_channels, mid_channels) self.cls_score = nn.Linear(mid_channels, num_classes) self.bbox_pred = nn.Linear(mid_channels, num_classes * 4) def forward(self, x): x = x.flatten(start_dim=1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) score = self.cls_score(x) bbox_delta = self.bbox_pred(x) return score, bbox_delta def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'mid_channels': 4, 'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (16, 4), (4, 1)) assert_size_stride(primals_9, (16,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(16)](buf1, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_0[grid(16)](buf3, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_9, buf3, reinterpret_tensor(primals_8, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_9 return buf4, buf5, primals_1, buf1, buf3, primals_8, primals_6, primals_4 class FastRCNNPredictorNew(nn.Module): def __init__(self, in_channels, mid_channels, num_classes): super().__init__() self.fc1 = nn.Linear(in_channels, mid_channels) self.fc2 = nn.Linear(mid_channels, mid_channels) self.cls_score = nn.Linear(mid_channels, num_classes) self.bbox_pred = nn.Linear(mid_channels, num_classes * 4) def forward(self, input_0): primals_1 = self.fc1.weight primals_3 = self.fc1.bias primals_2 = self.fc2.weight primals_5 = self.fc2.bias primals_4 = self.cls_score.weight primals_7 = self.cls_score.bias primals_8 = self.bbox_pred.weight primals_9 = self.bbox_pred.bias primals_6 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
Okery/PyTorch-Simple-MaskRCNN
FastRCNNPredictor
false
14,130
[ "MIT" ]
147
5e57a353f211c7130bfcf1d55cacd80057d81423
https://github.com/Okery/PyTorch-Simple-MaskRCNN/tree/5e57a353f211c7130bfcf1d55cacd80057d81423
StandardNLL
import torch class StandardNLL(torch.nn.modules.loss._Loss): """ Shape: log_prob: batch x time x class y_true: batch x time mask: batch x time output: batch """ def forward(self, log_prob, y_true, mask): mask = mask.float() log_P = torch.gather(log_prob.view(-1, log_prob.size(2)), 1, y_true .contiguous().view(-1, 1)) log_P = log_P.view(y_true.size(0), y_true.size(1)) log_P = log_P * mask sum_log_P = torch.sum(log_P, dim=1) / torch.sum(mask, dim=1) return -sum_log_P def get_inputs(): return [torch.ones([4, 4, 4], dtype=torch.int64), torch.ones([4, 4], dtype=torch.int64), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_mul_neg_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (x0 + 64 * x1), xmask) tmp10 = tl.load(in_ptr2 + (16 + x0 + 64 * x1), xmask) tmp13 = tl.load(in_ptr2 + (32 + x0 + 64 * x1), xmask) tmp16 = tl.load(in_ptr2 + (48 + x0 + 64 * x1), xmask) tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask, 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp7 = tmp6.to(tl.float32) tmp9 = tmp7 * tmp8 tmp11 = tmp7 * tmp10 tmp12 = tmp9 + tmp11 tmp14 = tmp7 * tmp13 tmp15 = tmp12 + tmp14 tmp17 = tmp7 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp8 + tmp10 tmp20 = tmp19 + tmp13 tmp21 = tmp20 + tmp16 tmp22 = tmp18 / tmp21 tmp23 = -tmp22 tl.store(out_ptr0 + x2, tmp23, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_mul_neg_sum_0[grid(64)](arg2_1, arg1_1, arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf0, class StandardNLLNew(torch.nn.modules.loss._Loss): """ Shape: log_prob: batch x time x class y_true: batch x time mask: batch x time output: batch """ def forward(self, input_0, input_1, input_2): arg1_1 = input_0 arg2_1 = input_1 arg0_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
NoteXYX/ACL2017
StandardNLL
false
14,131
[ "Apache-2.0" ]
119
436f59f2aa0044a9d57c95a2a58b2158cb99738d
https://github.com/NoteXYX/ACL2017/tree/436f59f2aa0044a9d57c95a2a58b2158cb99738d
GraphEncoderDecoderAttentionLayer
import torch import torch.utils.data import torch import torch.nn as nn import torch.nn.functional as F class GraphEncoderDecoderAttentionLayer(nn.Module): """ Graph-to-Graph message passing, adapted from https://arxiv.org/abs/1710.10903 """ def __init__(self, in_src_features, in_tgt_features, out_features, dropout, alpha, concat=True): super(GraphEncoderDecoderAttentionLayer, self).__init__() self.dropout = dropout self.in_src_features = in_src_features self.in_tgt_features = in_tgt_features self.out_features = out_features self.alpha = alpha self.concat = concat self.Ws = nn.Parameter(torch.empty(size=(in_src_features, out_features))) self.Wt = nn.Parameter(torch.empty(size=(in_tgt_features, out_features))) nn.init.xavier_uniform_(self.Ws.data, gain=1.414) nn.init.xavier_uniform_(self.Wt.data, gain=1.414) self.a = nn.Parameter(torch.empty(size=(2 * out_features, 1))) nn.init.xavier_uniform_(self.a.data, gain=1.414) self.leakyrelu = nn.LeakyReLU(self.alpha) def forward(self, h, ctx, adj): Ws_ctx = torch.bmm(ctx, self.Ws.repeat(ctx.size(0), 1, 1)) Wt_h = torch.bmm(h, self.Wt.repeat(h.size(0), 1, 1)) a_input = self._prepare_attentional_mechanism_input(Ws_ctx, Wt_h) e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(3)) zero_vec = -9000000000000000.0 * torch.ones_like(e) attention = torch.where(adj > 0, e, zero_vec) attention = F.softmax(attention, dim=2) attention = F.dropout(attention, self.dropout, training=self.training) h_prime = torch.matmul(attention, Ws_ctx) h_prime = F.leaky_relu(h_prime) return h_prime def _prepare_attentional_mechanism_input(self, Ws_ctx, Wt_h): Ns = Ws_ctx.size()[1] Nt = Wt_h.size()[1] Ws_ctx_repeated_in_chunks = Ws_ctx.repeat_interleave(Nt, dim=1) Wt_h_repeated_alternating = Wt_h.repeat([1, Ns, 1]) all_combinations_matrix = torch.cat([Ws_ctx_repeated_in_chunks, Wt_h_repeated_alternating], dim=2) return all_combinations_matrix.view(Ws_ctx.size(0), Nt, Ns, 2 * self.out_features) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'in_src_features': 4, 'in_tgt_features': 4, 'out_features': 4, 'dropout': 0.5, 'alpha': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_repeat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 % 16 x2 = xindex // 128 x3 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * (x1 // 4) + 16 * x2 + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * (x1 % 4) + 16 * x2 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_leaky_relu_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_leaky_relu_mul_where_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp9 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp16 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp23 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp3 = 4.0 tmp4 = tmp2 * tmp3 tmp5 = tl.where(tmp1, tmp2, tmp4) tmp6 = -8999999815811072.0 tmp7 = tl.where(tmp0, tmp5, tmp6) tmp11 = tmp10 * tmp3 tmp12 = tl.where(tmp9, tmp10, tmp11) tmp13 = tl.where(tmp8, tmp12, tmp6) tmp14 = triton_helpers.maximum(tmp7, tmp13) tmp18 = tmp17 * tmp3 tmp19 = tl.where(tmp16, tmp17, tmp18) tmp20 = tl.where(tmp15, tmp19, tmp6) tmp21 = triton_helpers.maximum(tmp14, tmp20) tmp25 = tmp24 * tmp3 tmp26 = tl.where(tmp23, tmp24, tmp25) tmp27 = tl.where(tmp22, tmp26, tmp6) tmp28 = triton_helpers.maximum(tmp21, tmp27) tmp29 = tmp7 - tmp28 tmp30 = tl_math.exp(tmp29) tmp31 = tmp13 - tmp28 tmp32 = tl_math.exp(tmp31) tmp33 = tmp30 + tmp32 tmp34 = tmp20 - tmp28 tmp35 = tl_math.exp(tmp34) tmp36 = tmp33 + tmp35 tmp37 = tmp27 - tmp28 tmp38 = tl_math.exp(tmp37) tmp39 = tmp36 + tmp38 tl.store(out_ptr0 + x0, tmp28, xmask) tl.store(out_ptr1 + x0, tmp39, xmask) @triton.jit def triton_poi_fused__softmax_leaky_relu_mul_where_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + x2, xmask).to(tl.int1) tmp2 = tl.load(in_out_ptr0 + x2, xmask) tmp8 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp3 = 4.0 tmp4 = tmp2 * tmp3 tmp5 = tl.where(tmp1, tmp2, tmp4) tmp6 = -8999999815811072.0 tmp7 = tl.where(tmp0, tmp5, tmp6) tmp9 = tmp7 - tmp8 tmp10 = tl_math.exp(tmp9) tmp12 = tmp10 / tmp11 tl.store(in_out_ptr0 + x2, tmp12, xmask) @triton.jit def triton_poi_fused_leaky_relu_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.01 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr1 + x0, tmp5, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (8, 1), (1, 1)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_repeat_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(primals_2, buf0, out=buf1) buf2 = buf0 del buf0 triton_poi_fused_repeat_0[grid(64)](primals_3, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(primals_4, buf2, out=buf3) buf4 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32) triton_poi_fused_cat_1[grid(512)](buf1, buf3, buf4, 512, XBLOCK=128, num_warps=4, num_stages=1) buf5 = reinterpret_tensor(buf3, (64, 1), (1, 1), 0) del buf3 extern_kernels.mm(reinterpret_tensor(buf4, (64, 8), (8, 1), 0), primals_5, out=buf5) buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_leaky_relu_2[grid(64)](buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_leaky_relu_2[grid(64)](primals_6, buf7, 64, XBLOCK =64, num_warps=1, num_stages=1) del primals_6 buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf9 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused__softmax_leaky_relu_mul_where_3[grid(16)](buf7, buf6, buf5, buf8, buf9, 16, XBLOCK=16, num_warps=1, num_stages=1) buf10 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0) del buf5 triton_poi_fused__softmax_leaky_relu_mul_where_4[grid(64)](buf10, buf7, buf6, buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf8 del buf9 buf11 = buf2 del buf2 extern_kernels.bmm(buf10, buf1, out=buf11) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_leaky_relu_5[grid(64)](buf11, buf12, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf11 return buf13, buf6, buf7, buf10, buf12, reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf4, (8, 64), (1, 8), 0 ), reinterpret_tensor(primals_5, (1, 8), (1, 1), 0 ), reinterpret_tensor(primals_4, (4, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0) class GraphEncoderDecoderAttentionLayerNew(nn.Module): """ Graph-to-Graph message passing, adapted from https://arxiv.org/abs/1710.10903 """ def __init__(self, in_src_features, in_tgt_features, out_features, dropout, alpha, concat=True): super(GraphEncoderDecoderAttentionLayerNew, self).__init__() self.dropout = dropout self.in_src_features = in_src_features self.in_tgt_features = in_tgt_features self.out_features = out_features self.alpha = alpha self.concat = concat self.Ws = nn.Parameter(torch.empty(size=(in_src_features, out_features))) self.Wt = nn.Parameter(torch.empty(size=(in_tgt_features, out_features))) nn.init.xavier_uniform_(self.Ws.data, gain=1.414) nn.init.xavier_uniform_(self.Wt.data, gain=1.414) self.a = nn.Parameter(torch.empty(size=(2 * out_features, 1))) nn.init.xavier_uniform_(self.a.data, gain=1.414) self.leakyrelu = nn.LeakyReLU(self.alpha) def _prepare_attentional_mechanism_input(self, Ws_ctx, Wt_h): Ns = Ws_ctx.size()[1] Nt = Wt_h.size()[1] Ws_ctx_repeated_in_chunks = Ws_ctx.repeat_interleave(Nt, dim=1) Wt_h_repeated_alternating = Wt_h.repeat([1, Ns, 1]) all_combinations_matrix = torch.cat([Ws_ctx_repeated_in_chunks, Wt_h_repeated_alternating], dim=2) return all_combinations_matrix.view(Ws_ctx.size(0), Nt, Ns, 2 * self.out_features) def forward(self, input_0, input_1, input_2): primals_1 = self.Ws primals_3 = self.Wt primals_5 = self.a primals_2 = input_0 primals_4 = input_1 primals_6 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
Nmegha2601/activitygraph_transformer
GraphEncoderDecoderAttentionLayer
false
14,132
[ "MIT" ]
63
4e21a4ea12527df470b7586d149fa4168a41307c
https://github.com/Nmegha2601/activitygraph_transformer/tree/4e21a4ea12527df470b7586d149fa4168a41307c
h_tanh
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed class h_tanh(nn.Module): def __init__(self, inplace=True, h_max=1): super(h_tanh, self).__init__() self.relu = nn.ReLU6(inplace=inplace) self.h_max = h_max def forward(self, x): return self.relu(x + 3) * self.h_max / 3 - self.h_max def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_hardtanh_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 3.0 tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = 1.0 tmp8 = tmp6 * tmp7 tmp9 = 0.3333333333333333 tmp10 = tmp8 * tmp9 tmp11 = tmp10 - tmp7 tl.store(out_ptr0 + x0, tmp11, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_hardtanh_mul_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class h_tanhNew(nn.Module): def __init__(self, inplace=True, h_max=1): super(h_tanhNew, self).__init__() self.relu = nn.ReLU6(inplace=inplace) self.h_max = h_max def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PINTO0309/micronet
h_tanh
false
14,133
[ "MIT" ]
221
97ff01d0ea9a42f0a3f0a93ac67660df26411f28
https://github.com/PINTO0309/micronet/tree/97ff01d0ea9a42f0a3f0a93ac67660df26411f28
AllReduceLinear
import torch from torch import Tensor import torch.distributed as dist import torch.nn as nn from torch.nn import Linear class ParallelModule(nn.Module): """Parents of all parallel layer classes""" def __init__(self): super().__init__() self.mp_group = None def allreduce(self, outputs): if self.mp_group is not None and dist.get_world_size(group=self. mp_group) > 1: dist.all_reduce(outputs, group=self.mp_group) return outputs class AllReduceLinear(Linear, ParallelModule): """All-reduce linear layer""" def forward(self, input: 'Tensor') ->Tensor: outputs = input.matmul(self.weight.t()) self.allreduce(outputs) if self.bias is not None: outputs += self.bias return outputs def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.distributed as dist import torch.nn as nn from torch.nn import Linear assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_view_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x4, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_add_view_0[grid(256)](buf2, primals_3, 256, XBLOCK =256, num_warps=4, num_stages=1) del primals_3 return buf2, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0) class ParallelModule(nn.Module): """Parents of all parallel layer classes""" def __init__(self): super().__init__() self.mp_group = None def allreduce(self, outputs): if self.mp_group is not None and dist.get_world_size(group=self. mp_group) > 1: dist.all_reduce(outputs, group=self.mp_group) return outputs class AllReduceLinearNew(Linear, ParallelModule): """All-reduce linear layer""" def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Oaklight/parallelformers
AllReduceLinear
false
14,134
[ "Apache-2.0" ]
454
57fc36f81734c29aaf814e092ce13681d3c28ede
https://github.com/Oaklight/parallelformers/tree/57fc36f81734c29aaf814e092ce13681d3c28ede
DenseSAGEConv
import math import torch import torch.nn.functional as F import torch.utils.data from torch.nn import Parameter def uniform(size, tensor): stdv = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-stdv, stdv) class DenseSAGEConv(torch.nn.Module): def __init__(self, in_channels, out_channels, norm=True, norm_embed= True, bias=True): super(DenseSAGEConv, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.norm = norm self.norm_embed = norm_embed self.weight = Parameter(torch.Tensor(self.in_channels, out_channels)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): uniform(self.in_channels, self.weight) uniform(self.in_channels, self.bias) def forward(self, x, adj): x = x.unsqueeze(0) if x.dim() == 2 else x adj = adj.unsqueeze(0) if adj.dim() == 2 else adj out = torch.matmul(adj, x) if self.norm: out = out / adj.sum(dim=-1, keepdim=True) out = torch.matmul(out, self.weight) if self.bias is not None: out = out + self.bias if self.norm_embed: out = F.normalize(out, p=2, dim=-1) return out def __repr__(self): return '{}({}, {})'.format(self.__class__.__name__, self. in_channels, self.out_channels) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import math import torch.utils.data from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_sum_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(in_out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_min_linalg_vector_norm_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + 1) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + 2) tmp13 = tl.broadcast_to(tmp12, [XBLOCK]) tmp17 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr1 + 3) tmp19 = tl.broadcast_to(tmp18, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tmp3 * tmp3 tmp8 = tmp5 + tmp7 tmp9 = tmp8 * tmp8 tmp10 = tmp4 + tmp9 tmp14 = tmp11 + tmp13 tmp15 = tmp14 * tmp14 tmp16 = tmp10 + tmp15 tmp20 = tmp17 + tmp19 tmp21 = tmp20 * tmp20 tmp22 = tmp16 + tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-12 tmp25 = triton_helpers.maximum(tmp23, tmp24) tl.store(out_ptr0 + x0, tmp25, xmask) @triton.jit def triton_poi_fused_add_div_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 / tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0 ), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_div_sum_0[grid(256)](buf1, primals_2, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_3, out=buf2) del primals_3 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_add_clamp_min_linalg_vector_norm_1[grid(64)](buf2, primals_4, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_2[grid(256)](buf2, primals_4, buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf3 return buf4, primals_4, buf2, reinterpret_tensor(buf1, (4, 64), (1, 4), 0) def uniform(size, tensor): stdv = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-stdv, stdv) class DenseSAGEConvNew(torch.nn.Module): def __init__(self, in_channels, out_channels, norm=True, norm_embed= True, bias=True): super(DenseSAGEConvNew, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.norm = norm self.norm_embed = norm_embed self.weight = Parameter(torch.Tensor(self.in_channels, out_channels)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): uniform(self.in_channels, self.weight) uniform(self.in_channels, self.bias) def __repr__(self): return '{}({}, {})'.format(self.__class__.__name__, self. in_channels, self.out_channels) def forward(self, input_0, input_1): primals_3 = self.weight primals_4 = self.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
NunoEdgarGFlowHub/pytorch_geometric
DenseSAGEConv
false
14,135
[ "MIT" ]
62
4a03a7e6484c38805a24a2e7362ef32b7e279036
https://github.com/NunoEdgarGFlowHub/pytorch_geometric/tree/4a03a7e6484c38805a24a2e7362ef32b7e279036
RPNHead
import torch import torch.nn.functional as F from torch import nn class RPNHead(nn.Module): def __init__(self, in_channels, num_anchors): super().__init__() self.conv = nn.Conv2d(in_channels, in_channels, 3, 1, 1) self.cls_logits = nn.Conv2d(in_channels, num_anchors, 1) self.bbox_pred = nn.Conv2d(in_channels, 4 * num_anchors, 1) for l in self.children(): nn.init.normal_(l.weight, std=0.01) nn.init.constant_(l.bias, 0) def forward(self, x): x = F.relu(self.conv(x)) logits = self.cls_logits(x) bbox_reg = self.bbox_pred(x) return logits, bbox_reg def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'num_anchors': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (16, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (16,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(256)](buf3, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf1, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 4, 4), (256, 16, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(1024)](buf5, primals_7, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 return buf3, buf5, primals_1, primals_3, primals_4, primals_6, buf1 class RPNHeadNew(nn.Module): def __init__(self, in_channels, num_anchors): super().__init__() self.conv = nn.Conv2d(in_channels, in_channels, 3, 1, 1) self.cls_logits = nn.Conv2d(in_channels, num_anchors, 1) self.bbox_pred = nn.Conv2d(in_channels, 4 * num_anchors, 1) for l in self.children(): nn.init.normal_(l.weight, std=0.01) nn.init.constant_(l.bias, 0) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_4 = self.cls_logits.weight primals_5 = self.cls_logits.bias primals_6 = self.bbox_pred.weight primals_7 = self.bbox_pred.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
Okery/PyTorch-Simple-MaskRCNN
RPNHead
false
14,136
[ "MIT" ]
147
5e57a353f211c7130bfcf1d55cacd80057d81423
https://github.com/Okery/PyTorch-Simple-MaskRCNN/tree/5e57a353f211c7130bfcf1d55cacd80057d81423
UnfoldTemporalWindows
import torch import torch.nn as nn class UnfoldTemporalWindows(nn.Module): def __init__(self, window_size, window_stride, window_dilation=1): super().__init__() self.window_size = window_size self.window_stride = window_stride self.window_dilation = window_dilation self.padding = (window_size + (window_size - 1) * (window_dilation - 1) - 1) // 2 self.unfold = nn.Unfold(kernel_size=(self.window_size, 1), dilation =(self.window_dilation, 1), stride=(self.window_stride, 1), padding=(self.padding, 0)) def forward(self, x): N, C, _T, V = x.shape x = self.unfold(x) x = x.view(N, C, self.window_size, -1, V).permute(0, 1, 3, 2, 4 ).contiguous() x = x.view(N, C, -1, self.window_size * V) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'window_size': 4, 'window_stride': 1}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x2 = xindex // 16 % 3 x3 = xindex // 48 x4 = xindex % 16 x5 = xindex tmp0 = -1 + x1 + x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-4 + x4 + 4 * x2 + 16 * x3), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + x5, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 4, 4), (192, 48, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(768)](arg0_1, buf0, 768, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4, 3, 16), (192, 48, 16, 1), 0), class UnfoldTemporalWindowsNew(nn.Module): def __init__(self, window_size, window_stride, window_dilation=1): super().__init__() self.window_size = window_size self.window_stride = window_stride self.window_dilation = window_dilation self.padding = (window_size + (window_size - 1) * (window_dilation - 1) - 1) // 2 self.unfold = nn.Unfold(kernel_size=(self.window_size, 1), dilation =(self.window_dilation, 1), stride=(self.window_stride, 1), padding=(self.padding, 0)) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PINTO0309/MS-G3D
UnfoldTemporalWindows
false
14,137
[ "MIT" ]
343
5f0f7740ed8543bd0e288affca2a76541c83669e
https://github.com/PINTO0309/MS-G3D/tree/5f0f7740ed8543bd0e288affca2a76541c83669e
SSP
import torch import numpy as np from torch import nn import torch.nn.functional as F def ssp(*args, **kwargs): return F.softplus(*args, **kwargs) - np.log(2) class SSP(nn.Softplus): def forward(self, xs): return ssp(xs, self.beta, self.threshold) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np from torch import nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_log_softplus_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = 20.0 tmp4 = tmp2 > tmp3 tmp5 = tl_math.exp(tmp2) tmp6 = libdevice.log1p(tmp5) tmp7 = tmp6 * tmp1 tmp8 = tl.where(tmp4, tmp0, tmp7) tmp9 = 0.6931471805599453 tmp10 = tmp8 - tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_log_softplus_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, def ssp(*args, **kwargs): return F.softplus(*args, **kwargs) - np.log(2) class SSPNew(nn.Softplus): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PKUfjh/deepqmc
SSP
false
14,138
[ "MIT" ]
224
2a948ce712dd4e40568aa35931527e6c874eba73
https://github.com/PKUfjh/deepqmc/tree/2a948ce712dd4e40568aa35931527e6c874eba73
SmallMotionEncoder
import torch import torch.nn as nn import torch.nn.functional as F class SmallMotionEncoder(nn.Module): """ Encodes motion features from the correlation levels of the pyramid and the input flow estimate using convolution layers. Parameters ---------- corr_radius : int Correlation radius of the correlation pyramid corr_levels : int Correlation levels of the correlation pyramid """ def __init__(self, corr_radius, corr_levels): super(SmallMotionEncoder, self).__init__() cor_planes = corr_levels * (2 * corr_radius + 1) ** 2 self.convc1 = nn.Conv2d(cor_planes, 96, 1, padding=0) self.convf1 = nn.Conv2d(2, 64, 7, padding=3) self.convf2 = nn.Conv2d(64, 32, 3, padding=1) self.conv = nn.Conv2d(128, 80, 3, padding=1) def forward(self, flow, corr): """ Parameters ---------- flow : torch.Tensor A tensor of shape N x 2 x H x W corr : torch.Tensor A tensor of shape N x (corr_levels * (2 * corr_radius + 1) ** 2) x H x W Returns ------- torch.Tensor A tensor of shape N x 82 x H x W """ cor = F.relu(self.convc1(corr)) flo = F.relu(self.convf1(flow)) flo = F.relu(self.convf2(flo)) cor_flo = torch.cat([cor, flo], dim=1) out = F.relu(self.conv(cor_flo)) return torch.cat([out, flow], dim=1) def get_inputs(): return [torch.rand([4, 2, 64, 64]), torch.rand([4, 324, 64, 64])] def get_init_inputs(): return [[], {'corr_radius': 4, 'corr_levels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 128 x0 = xindex % 4096 x2 = xindex // 524288 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 96, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 393216 * x2), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp15 = tl.load(in_ptr2 + (x0 + 4096 * (-96 + x1) + 131072 * x2), tmp12, other=0.0) tmp16 = tl.load(in_ptr3 + (-96 + x1), tmp12, eviction_policy= 'evict_last', other=0.0) tmp17 = tmp15 + tmp16 tmp18 = triton_helpers.maximum(tmp8, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp12, tmp18, tmp19) tmp21 = tl.where(tmp4, tmp11, tmp20) tl.store(out_ptr0 + x3, tmp21, None) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 82 x0 = xindex % 4096 x2 = xindex // 335872 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 80, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 327680 * x2), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 82, tl.int64) tmp15 = tl.load(in_ptr2 + (x0 + 4096 * (-80 + x1) + 8192 * x2), tmp12, other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 80 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 32 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 96 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (96, 324, 1, 1), (324, 1, 1, 1)) assert_size_stride(primals_2, (96,), (1,)) assert_size_stride(primals_3, (4, 324, 64, 64), (1327104, 4096, 64, 1)) assert_size_stride(primals_4, (64, 2, 7, 7), (98, 49, 7, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (4, 2, 64, 64), (8192, 4096, 64, 1)) assert_size_stride(primals_7, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_8, (32,), (1,)) assert_size_stride(primals_9, (80, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_10, (80,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 96, 64, 64), (393216, 4096, 64, 1)) buf1 = extern_kernels.convolution(primals_6, primals_4, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(1048576)](buf2, primals_5, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf3 = extern_kernels.convolution(buf2, primals_7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf4 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) triton_poi_fused_cat_1[grid(2097152)](buf0, primals_2, buf3, primals_8, buf4, 2097152, XBLOCK=1024, num_warps=4, num_stages=1) buf5 = extern_kernels.convolution(buf4, primals_9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 80, 64, 64), (327680, 4096, 64, 1)) buf6 = empty_strided_cuda((4, 82, 64, 64), (335872, 4096, 64, 1), torch.float32) triton_poi_fused_cat_2[grid(1343488)](buf5, primals_10, primals_6, buf6, 1343488, XBLOCK=1024, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 80, 64, 64), (327680, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_3[grid(1310720)]( buf5, primals_10, buf7, 1310720, XBLOCK=1024, num_warps=4, num_stages=1) del buf5 del primals_10 buf8 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_4[grid(524288)]( buf3, primals_8, buf8, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del buf3 del primals_8 buf9 = empty_strided_cuda((4, 96, 64, 64), (393216, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_5[grid(1572864)]( buf0, primals_2, buf9, 1572864, XBLOCK=1024, num_warps=4, num_stages=1) del buf0 del primals_2 return (buf6, primals_1, primals_3, primals_4, primals_6, primals_7, primals_9, buf2, buf4, buf7, buf8, buf9) class SmallMotionEncoderNew(nn.Module): """ Encodes motion features from the correlation levels of the pyramid and the input flow estimate using convolution layers. Parameters ---------- corr_radius : int Correlation radius of the correlation pyramid corr_levels : int Correlation levels of the correlation pyramid """ def __init__(self, corr_radius, corr_levels): super(SmallMotionEncoderNew, self).__init__() cor_planes = corr_levels * (2 * corr_radius + 1) ** 2 self.convc1 = nn.Conv2d(cor_planes, 96, 1, padding=0) self.convf1 = nn.Conv2d(2, 64, 7, padding=3) self.convf2 = nn.Conv2d(64, 32, 3, padding=1) self.conv = nn.Conv2d(128, 80, 3, padding=1) def forward(self, input_0, input_1): primals_1 = self.convc1.weight primals_2 = self.convc1.bias primals_4 = self.convf1.weight primals_5 = self.convf1.bias primals_7 = self.convf2.weight primals_8 = self.convf2.bias primals_9 = self.conv.weight primals_10 = self.conv.bias primals_6 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
NeelayS/ezflow
SmallMotionEncoder
false
14,139
[ "MIT" ]
94
b93a48c4adf5021f7eacbfc43220c7efa5ae55cd
https://github.com/NeelayS/ezflow/tree/b93a48c4adf5021f7eacbfc43220c7efa5ae55cd
ElectronicAsymptotic
import torch from torch import nn class ElectronicAsymptotic(nn.Module): """Jastrow factor with a correct electronic cusp. The Jastrow factor is calculated from distances between all pairs of electrons, :math:`d_{ij}`, .. math:: \\mathrm \\gamma :=\\sum_{ij}-\\frac{c}{\\alpha(1+\\alpha d_{ij})} Args: cusp (float): *c*, target cusp value alpha (float): :math:`\\alpha`, rate of decay of the cusp function to 1. Shape: - Input, :math:`d_{ij}`: :math:`(*,N_\\text{pair})` - Output, :math:`\\gamma`: :math:`(*)` """ def __init__(self, *, cusp, alpha=1.0): super().__init__() self.cusp = cusp self.alpha = alpha def forward(self, dists): return -(self.cusp / (self.alpha * (1 + self.alpha * dists))).sum(dim =-1) def extra_repr(self): return f'cusp={self.cusp}, alpha={self.alpha}' def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'cusp': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_neg_reciprocal_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tmp2 + tmp1 tmp4 = tmp3 * tmp1 tmp5 = tl.full([1], 1, tl.int32) tmp6 = tmp5 / tmp4 tmp7 = 4.0 tmp8 = tmp6 * tmp7 tmp10 = tmp9 * tmp1 tmp11 = tmp10 + tmp1 tmp12 = tmp11 * tmp1 tmp13 = tmp5 / tmp12 tmp14 = tmp13 * tmp7 tmp15 = tmp8 + tmp14 tmp17 = tmp16 * tmp1 tmp18 = tmp17 + tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp5 / tmp19 tmp21 = tmp20 * tmp7 tmp22 = tmp15 + tmp21 tmp24 = tmp23 * tmp1 tmp25 = tmp24 + tmp1 tmp26 = tmp25 * tmp1 tmp27 = tmp5 / tmp26 tmp28 = tmp27 * tmp7 tmp29 = tmp22 + tmp28 tmp30 = -tmp29 tl.store(out_ptr0 + x0, tmp30, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_neg_reciprocal_sum_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class ElectronicAsymptoticNew(nn.Module): """Jastrow factor with a correct electronic cusp. The Jastrow factor is calculated from distances between all pairs of electrons, :math:`d_{ij}`, .. math:: \\mathrm \\gamma :=\\sum_{ij}-\\frac{c}{\\alpha(1+\\alpha d_{ij})} Args: cusp (float): *c*, target cusp value alpha (float): :math:`\\alpha`, rate of decay of the cusp function to 1. Shape: - Input, :math:`d_{ij}`: :math:`(*,N_\\text{pair})` - Output, :math:`\\gamma`: :math:`(*)` """ def __init__(self, *, cusp, alpha=1.0): super().__init__() self.cusp = cusp self.alpha = alpha def extra_repr(self): return f'cusp={self.cusp}, alpha={self.alpha}' def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PKUfjh/deepqmc
ElectronicAsymptotic
false
14,140
[ "MIT" ]
224
2a948ce712dd4e40568aa35931527e6c874eba73
https://github.com/PKUfjh/deepqmc/tree/2a948ce712dd4e40568aa35931527e6c874eba73
LossEnergy
import torch from torch import nn class WaveFunctionLoss(nn.Module): """Base class for all wave function loss functions. Any such loss must be derived from the local energy and wave function values, :math:`L(\\{E_\\text{loc}[\\psi],\\ln|\\psi|,w\\})`, using also importance-sampling weights *w*. Shape: - Input1, :math:`E_\\text{loc}[\\psi](\\mathbf r)`: :math:`(*)` - Input2, :math:`\\ln|\\psi(\\mathbf r)|`: :math:`(*)` - Input3, :math:`w(\\mathbf r)`: :math:`(*)` - Output, *L*: :math:`()` """ pass class LossEnergy(WaveFunctionLoss): """Total energy loss function. .. math:: L:=2\\mathbb E\\big[(E_\\text{loc}-\\mathbb E[E_\\text{loc}])\\ln|\\psi|\\big] Taking a derivative of only the logarithm, the resulting gradient is equivalent, thanks to the Hermitian property of the Hamiltonian, to the gradient of the plain total energy loss function, :math:`\\mathbb E[E_\\text{loc}]`. """ def forward(self, Es_loc, log_psis, ws): assert Es_loc.grad_fn is None self.weights = 2 * (Es_loc - (ws * Es_loc).mean()) / len(Es_loc) return (self.weights * ws * log_psis).sum() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_mean_mul_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr1, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp14 = tl.load(in_ptr2 + r0, None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = 256.0 tmp7 = tmp5 / tmp6 tmp8 = tmp1 - tmp7 tmp9 = 2.0 tmp10 = tmp8 * tmp9 tmp11 = 0.25 tmp12 = tmp10 * tmp11 tmp13 = tmp12 * tmp0 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tl.store(out_ptr1 + tl.broadcast_to(r0, [RBLOCK]), tmp12, None) tl.store(out_ptr2 + tl.full([1], 0, tl.int32), tmp18, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf2 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_div_mean_mul_sub_sum_0[grid(1)](arg1_1, arg0_1, arg2_1, buf1, buf2, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf2, buf1 class WaveFunctionLoss(nn.Module): """Base class for all wave function loss functions. Any such loss must be derived from the local energy and wave function values, :math:`L(\\{E_\\text{loc}[\\psi],\\ln|\\psi|,w\\})`, using also importance-sampling weights *w*. Shape: - Input1, :math:`E_\\text{loc}[\\psi](\\mathbf r)`: :math:`(*)` - Input2, :math:`\\ln|\\psi(\\mathbf r)|`: :math:`(*)` - Input3, :math:`w(\\mathbf r)`: :math:`(*)` - Output, *L*: :math:`()` """ pass class LossEnergyNew(WaveFunctionLoss): """Total energy loss function. .. math:: L:=2\\mathbb E\\big[(E_\\text{loc}-\\mathbb E[E_\\text{loc}])\\ln|\\psi|\\big] Taking a derivative of only the logarithm, the resulting gradient is equivalent, thanks to the Hermitian property of the Hamiltonian, to the gradient of the plain total energy loss function, :math:`\\mathbb E[E_\\text{loc}]`. """ def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
PKUfjh/deepqmc
LossEnergy
false
14,141
[ "MIT" ]
224
2a948ce712dd4e40568aa35931527e6c874eba73
https://github.com/PKUfjh/deepqmc/tree/2a948ce712dd4e40568aa35931527e6c874eba73
MotionEncoder
import torch import torch.nn as nn import torch.nn.functional as F class MotionEncoder(nn.Module): """ Encodes motion features from the correlation levels of the pyramid and the input flow estimate using convolution layers. Parameters ---------- corr_radius : int Correlation radius of the correlation pyramid corr_levels : int Correlation levels of the correlation pyramid """ def __init__(self, corr_radius, corr_levels): super(MotionEncoder, self).__init__() cor_planes = corr_levels * (2 * corr_radius + 1) ** 2 self.convc1 = nn.Conv2d(cor_planes, 256, 1, padding=0) self.convc2 = nn.Conv2d(256, 192, 3, padding=1) self.convf1 = nn.Conv2d(2, 128, 7, padding=3) self.convf2 = nn.Conv2d(128, 64, 3, padding=1) self.conv = nn.Conv2d(64 + 192, 128 - 2, 3, padding=1) def forward(self, flow, corr): """ Parameters ---------- flow : torch.Tensor A tensor of shape N x 2 x H x W corr : torch.Tensor A tensor of shape N x (corr_levels * (2 * corr_radius + 1) ** 2) x H x W Returns ------- torch.Tensor A tensor of shape N x 128 x H x W """ cor = F.relu(self.convc1(corr)) cor = F.relu(self.convc2(cor)) flo = F.relu(self.convf1(flow)) flo = F.relu(self.convf2(flo)) cor_flo = torch.cat([cor, flo], dim=1) out = F.relu(self.conv(cor_flo)) return torch.cat([out, flow], dim=1) def get_inputs(): return [torch.rand([4, 2, 64, 64]), torch.rand([4, 324, 64, 64])] def get_init_inputs(): return [[], {'corr_radius': 4, 'corr_levels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 256 x0 = xindex % 4096 x2 = xindex // 1048576 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 192, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 786432 * x2), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 256, tl.int64) tmp15 = tl.load(in_ptr2 + (x0 + 4096 * (-192 + x1) + 262144 * x2), tmp12, other=0.0) tmp16 = tl.load(in_ptr3 + (-192 + x1), tmp12, eviction_policy= 'evict_last', other=0.0) tmp17 = tmp15 + tmp16 tmp18 = triton_helpers.maximum(tmp8, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp12, tmp18, tmp19) tmp21 = tl.where(tmp4, tmp11, tmp20) tl.store(out_ptr0 + x3, tmp21, None) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 128 x0 = xindex % 4096 x2 = xindex // 524288 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 126, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 516096 * x2), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp15 = tl.load(in_ptr2 + (x0 + 4096 * (-126 + x1) + 8192 * x2), tmp12, other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 126 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 192 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (256, 324, 1, 1), (324, 1, 1, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 324, 64, 64), (1327104, 4096, 64, 1)) assert_size_stride(primals_4, (192, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_5, (192,), (1,)) assert_size_stride(primals_6, (128, 2, 7, 7), (98, 49, 7, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (4, 2, 64, 64), (8192, 4096, 64, 1)) assert_size_stride(primals_9, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_10, (64,), (1,)) assert_size_stride(primals_11, (126, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_12, (126,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 256, 64, 64), (1048576, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(4194304)](buf1, primals_2, 4194304, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 192, 64, 64), (786432, 4096, 64, 1)) buf3 = extern_kernels.convolution(primals_8, primals_6, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_relu_1[grid(2097152)](buf4, primals_7, 2097152, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf5 = extern_kernels.convolution(buf4, primals_9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf6 = empty_strided_cuda((4, 256, 64, 64), (1048576, 4096, 64, 1), torch.float32) triton_poi_fused_cat_2[grid(4194304)](buf2, primals_5, buf5, primals_10, buf6, 4194304, XBLOCK=1024, num_warps=4, num_stages=1) buf7 = extern_kernels.convolution(buf6, primals_11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 126, 64, 64), (516096, 4096, 64, 1)) buf8 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) triton_poi_fused_cat_3[grid(2097152)](buf7, primals_12, primals_8, buf8, 2097152, XBLOCK=1024, num_warps=4, num_stages=1) buf9 = empty_strided_cuda((4, 126, 64, 64), (516096, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_4[grid(2064384)]( buf7, primals_12, buf9, 2064384, XBLOCK=512, num_warps=8, num_stages=1) del buf7 del primals_12 buf10 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_5[grid(1048576)]( buf5, primals_10, buf10, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del buf5 del primals_10 buf11 = empty_strided_cuda((4, 192, 64, 64), (786432, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_6[grid(3145728)]( buf2, primals_5, buf11, 3145728, XBLOCK=1024, num_warps=4, num_stages=1) del buf2 del primals_5 return (buf8, primals_1, primals_3, primals_4, primals_6, primals_8, primals_9, primals_11, buf1, buf4, buf6, buf9, buf10, buf11) class MotionEncoderNew(nn.Module): """ Encodes motion features from the correlation levels of the pyramid and the input flow estimate using convolution layers. Parameters ---------- corr_radius : int Correlation radius of the correlation pyramid corr_levels : int Correlation levels of the correlation pyramid """ def __init__(self, corr_radius, corr_levels): super(MotionEncoderNew, self).__init__() cor_planes = corr_levels * (2 * corr_radius + 1) ** 2 self.convc1 = nn.Conv2d(cor_planes, 256, 1, padding=0) self.convc2 = nn.Conv2d(256, 192, 3, padding=1) self.convf1 = nn.Conv2d(2, 128, 7, padding=3) self.convf2 = nn.Conv2d(128, 64, 3, padding=1) self.conv = nn.Conv2d(64 + 192, 128 - 2, 3, padding=1) def forward(self, input_0, input_1): primals_1 = self.convc1.weight primals_2 = self.convc1.bias primals_4 = self.convc2.weight primals_5 = self.convc2.bias primals_6 = self.convf1.weight primals_7 = self.convf1.bias primals_9 = self.convf2.weight primals_10 = self.convf2.bias primals_11 = self.conv.weight primals_12 = self.conv.bias primals_8 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
NeelayS/ezflow
MotionEncoder
false
14,142
[ "MIT" ]
94
b93a48c4adf5021f7eacbfc43220c7efa5ae55cd
https://github.com/NeelayS/ezflow/tree/b93a48c4adf5021f7eacbfc43220c7efa5ae55cd
ResidualAttention
import torch from torch import nn class ResidualAttention(nn.Module): def __init__(self, channel=512, num_class=1000, la=0.2): super().__init__() self.la = la self.fc = nn.Conv2d(in_channels=channel, out_channels=num_class, kernel_size=1, stride=1, bias=False) def forward(self, x): _b, _c, _h, _w = x.shape y_raw = self.fc(x).flatten(2) y_avg = torch.mean(y_raw, dim=2) y_max = torch.max(y_raw, dim=2)[0] score = y_avg + self.la * y_max return score def get_inputs(): return [torch.rand([4, 512, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None) @triton.jit def triton_red_fused_max_mean_1(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128000 rnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 1000 x1 = xindex // 1000 _tmp2 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) x3 = xindex _tmp4 = tl.full([XBLOCK, RBLOCK], float('-inf'), tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (x0 + 1000 * r2 + 128000 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = _tmp2 + tmp1 _tmp2 = tl.where(rmask & xmask, tmp3, _tmp2) tmp5 = triton_helpers.maximum(_tmp4, tmp1) _tmp4 = tl.where(rmask & xmask, tmp5, _tmp4) tmp2 = tl.sum(_tmp2, 1)[:, None] tl.store(out_ptr0 + x3, tmp2, xmask) tmp4 = triton_helpers.max2(_tmp4, 1)[:, None] tl.store(out_ptr1 + x3, tmp4, xmask) @triton.jit def triton_per_fused_add_max_mean_mul_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4000 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 1000 x1 = xindex // 1000 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 1000 * r2 + 32000 * x1), xmask, other=0.0) tmp5 = tl.load(in_ptr1 + (x0 + 1000 * r2 + 32000 * x1), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, float('-inf')) tmp9 = triton_helpers.max2(tmp8, 1)[:, None] tmp10 = 4096.0 tmp11 = tmp4 / tmp10 tmp12 = 0.2 tmp13 = tmp9 * tmp12 tmp14 = tmp11 + tmp13 tl.debug_barrier() tl.store(in_out_ptr0 + x3, tmp14, xmask) @triton.jit def triton_red_fused_max_3(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr, RBLOCK: tl.constexpr): xnumel = 4000 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 1000 x1 = xindex // 1000 _tmp2 = tl.full([XBLOCK, RBLOCK], float('-inf'), tl.float32) _tmp2_index = tl.full([XBLOCK, RBLOCK], 9223372036854775807, tl.int64) x3 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (x0 + 1000 * r2 + 4096000 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) _tmp2_next, _tmp2_index_next = triton_helpers.maximum_with_index(_tmp2, _tmp2_index, tmp1, rindex) _tmp2 = tl.where(rmask & xmask, _tmp2_next, _tmp2) _tmp2_index = tl.where(rmask & xmask, _tmp2_index_next, _tmp2_index) _, tmp2_tmp = triton_helpers.max_with_index(_tmp2, _tmp2_index, 1) tmp2 = tmp2_tmp[:, None] tl.store(out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 512, 64, 64), (2097152, 4096, 64, 1)) assert_size_stride(primals_2, (1000, 512, 1, 1), (512, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512 ), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(2048, 4096)](primals_1, buf0, 2048, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1000, 64, 64), (4096000, 1, 64000, 1000)) buf2 = empty_strided_cuda((4, 1000, 32), (32000, 1, 1000), torch. float32) buf4 = empty_strided_cuda((4, 1000, 32), (32000, 1, 1000), torch. float32) triton_red_fused_max_mean_1[grid(128000)](buf1, buf2, buf4, 128000, 128, XBLOCK=64, RBLOCK=8, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 1000), (1000, 1), torch.float32) buf7 = buf3 del buf3 triton_per_fused_add_max_mean_mul_2[grid(4000)](buf7, buf2, buf4, 4000, 32, XBLOCK=128, num_warps=8, num_stages=1) del buf2 del buf4 buf6 = empty_strided_cuda((4, 1000), (1000, 1), torch.int64) triton_red_fused_max_3[grid(4000)](buf1, buf6, 4000, 4096, XBLOCK=8, RBLOCK=512, num_warps=16, num_stages=1) del buf1 return buf7, buf0, primals_2, reinterpret_tensor(buf6, (4, 1000, 1), ( 1000, 1, 1), 0) class ResidualAttentionNew(nn.Module): def __init__(self, channel=512, num_class=1000, la=0.2): super().__init__() self.la = la self.fc = nn.Conv2d(in_channels=channel, out_channels=num_class, kernel_size=1, stride=1, bias=False) def forward(self, input_0): primals_2 = self.fc.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
Nitin-Mane/External-Attention-pytorch
ResidualAttention
false
14,143
[ "MIT" ]
4,466
1ceda306c41063af11c956334747763444a4d83f
https://github.com/Nitin-Mane/External-Attention-pytorch/tree/1ceda306c41063af11c956334747763444a4d83f
SLP
import torch import torch.nn.functional as F import torch.utils.data.distributed import torch import torch.nn as nn class SLP(nn.Module): def __init__(self, input_size, logits): super(SLP, self).__init__() self._input_size = input_size self.fc = nn.Linear(input_size, logits) def forward(self, x): x = x.view(-1, self._input_size) x = self.fc(x) return F.log_softmax(x, dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'logits': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data.distributed import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](buf0, buf1, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__log_softmax_1[grid(256)](buf1, buf2, 256, XBLOCK= 256, num_warps=4, num_stages=1) del buf1 return buf2, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf2 class SLPNew(nn.Module): def __init__(self, input_size, logits): super(SLPNew, self).__init__() self._input_size = input_size self.fc = nn.Linear(input_size, logits) def forward(self, input_0): primals_2 = self.fc.weight primals_3 = self.fc.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Pandinosaurus/KungFu
SLP
false
14,144
[ "Apache-2.0" ]
291
80dfa463450330e920b413f65cc49d8e013b84a9
https://github.com/Pandinosaurus/KungFu/tree/80dfa463450330e920b413f65cc49d8e013b84a9
Biaffine
import torch import torch.utils.data.dataloader import torch.nn class Biaffine(torch.nn.Module): def __init__(self, n_in, n_out=1, bias_x=True, bias_y=True): """ :param n_in: size of input :param n_out: number of channels :param bias_x: set bias for x :param bias_x: set bias for y """ super(Biaffine, self).__init__() self.n_in = n_in self.n_out = n_out self.bias_x = bias_x self.bias_y = bias_y self.weight = torch.nn.Parameter(torch.Tensor(n_out, n_in + bias_x, n_in + bias_y)) self.reset_parameters() def extra_repr(self): st = 'n_in:{}, n_out:{}, bias_x:{}, bias_x:{}'.format(self.n_in, self.n_out, self.bias_x, self.bias_y) return st def reset_parameters(self): torch.nn.init.zeros_(self.weight) def forward(self, x, y): if self.bias_x: x = torch.cat((x, torch.ones_like(x[..., :1])), -1) if self.bias_y: y = torch.cat((y, torch.ones_like(y[..., :1])), -1) s = torch.einsum('bxi,oij,byj->boxy', x, self.weight, y) s = s.squeeze(1) return s def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'n_in': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data.dataloader import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 5, tl.int64) tmp9 = 1.0 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp6, tmp9, tmp10) tmp12 = tl.where(tmp4, tmp5, tmp11) tl.store(out_ptr0 + x2, tmp12, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (1, 5, 5), (25, 5, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(80)](primals_1, buf0, 80, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((1, 16, 5), (80, 5, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (1, 16, 5), (0, 5, 1), 0), primals_3, out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32) triton_poi_fused_cat_0[grid(80)](primals_2, buf2, 80, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf2, reinterpret_tensor(buf1, (4, 5, 4), (20, 1, 5), 0), out=buf3) del buf1 return reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf2, (4, 5, 4), (20, 1, 5), 0 ), reinterpret_tensor(buf0, (1, 5, 16), (80, 1, 5), 0) class BiaffineNew(torch.nn.Module): def __init__(self, n_in, n_out=1, bias_x=True, bias_y=True): """ :param n_in: size of input :param n_out: number of channels :param bias_x: set bias for x :param bias_x: set bias for y """ super(BiaffineNew, self).__init__() self.n_in = n_in self.n_out = n_out self.bias_x = bias_x self.bias_y = bias_y self.weight = torch.nn.Parameter(torch.Tensor(n_out, n_in + bias_x, n_in + bias_y)) self.reset_parameters() def extra_repr(self): st = 'n_in:{}, n_out:{}, bias_x:{}, bias_x:{}'.format(self.n_in, self.n_out, self.bias_x, self.bias_y) return st def reset_parameters(self): torch.nn.init.zeros_(self.weight) def forward(self, input_0, input_1): primals_3 = self.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
ParikhKadam/flair
Biaffine
false
14,145
[ "MIT" ]
7,539
a1732bc5ab0b4aeb09d1ed3a630ae2fd8fa095ef
https://github.com/ParikhKadam/flair/tree/a1732bc5ab0b4aeb09d1ed3a630ae2fd8fa095ef
PairwiseBCELoss
import torch from abc import abstractmethod import torch.utils.data.dataloader import torch.nn.functional as F from torch import nn import torch.nn class SimilarityLoss(nn.Module): def __init__(self): super(SimilarityLoss, self).__init__() @abstractmethod def forward(self, inputs, targets): pass class PairwiseBCELoss(SimilarityLoss): """ Binary cross entropy between pair similarities and pair labels. """ def __init__(self, balanced=False): super(PairwiseBCELoss, self).__init__() self.balanced = balanced def forward(self, inputs, targets): n = inputs.shape[0] neg_targets = torch.ones_like(targets) - targets bce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction='none') if self.balanced: weight_matrix = n * (targets / 2.0 + neg_targets / (2.0 * (n - 1))) bce_loss *= weight_matrix loss = bce_loss.mean() return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from abc import abstractmethod import torch.utils.data.dataloader from torch import nn import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_mean_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_mean_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class SimilarityLoss(nn.Module): def __init__(self): super(SimilarityLoss, self).__init__() @abstractmethod def forward(self, inputs, targets): pass class PairwiseBCELossNew(SimilarityLoss): """ Binary cross entropy between pair similarities and pair labels. """ def __init__(self, balanced=False): super(PairwiseBCELossNew, self).__init__() self.balanced = balanced def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ParikhKadam/flair
PairwiseBCELoss
false
14,146
[ "MIT" ]
7,539
a1732bc5ab0b4aeb09d1ed3a630ae2fd8fa095ef
https://github.com/ParikhKadam/flair/tree/a1732bc5ab0b4aeb09d1ed3a630ae2fd8fa095ef
NegativeScaledDotProduct
import torch import torch.utils.data.dataloader import torch.nn def dot_product(a: 'torch.Tensor', b: 'torch.Tensor', normalize=False): """ Computes dot product for pairs of vectors. :param normalize: Vectors are normalized (leads to cosine similarity) :return: Matrix with res[i][j] = dot_product(a[i], b[j]) """ if len(a.shape) == 1: a = a.unsqueeze(0) if len(b.shape) == 1: b = b.unsqueeze(0) if normalize: a = torch.nn.functional.normalize(a, p=2, dim=1) b = torch.nn.functional.normalize(b, p=2, dim=1) return torch.mm(a, b.transpose(0, 1)) class NegativeScaledDotProduct(torch.nn.Module): def forward(self, a, b): sqrt_d = torch.sqrt(torch.tensor(a.size(-1))) return -dot_product(a, b, normalize=False) / sqrt_d def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data.dataloader import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_neg_sqrt_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = -tmp0 tmp2 = 0.5 tmp3 = tmp1 * tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(arg0_1, reinterpret_tensor(arg1_1, (4, 4), (1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_div_neg_sqrt_0[grid(16)](buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf1, def dot_product(a: 'torch.Tensor', b: 'torch.Tensor', normalize=False): """ Computes dot product for pairs of vectors. :param normalize: Vectors are normalized (leads to cosine similarity) :return: Matrix with res[i][j] = dot_product(a[i], b[j]) """ if len(a.shape) == 1: a = a.unsqueeze(0) if len(b.shape) == 1: b = b.unsqueeze(0) if normalize: a = torch.nn.functional.normalize(a, p=2, dim=1) b = torch.nn.functional.normalize(b, p=2, dim=1) return torch.mm(a, b.transpose(0, 1)) class NegativeScaledDotProductNew(torch.nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ParikhKadam/flair
NegativeScaledDotProduct
false
14,147
[ "MIT" ]
7,539
a1732bc5ab0b4aeb09d1ed3a630ae2fd8fa095ef
https://github.com/ParikhKadam/flair/tree/a1732bc5ab0b4aeb09d1ed3a630ae2fd8fa095ef
CAModel
import torch import torch.nn as nn import torch.nn.functional as F class CAModel(nn.Module): def __init__(self, env_d): super(CAModel, self).__init__() self.conv1 = nn.Conv2d(env_d * 3, 232, 1) self.conv2 = nn.Conv2d(232, env_d, 1) nn.init.zeros_(self.conv2.weight) nn.init.zeros_(self.conv2.bias) def forward(self, x): x = F.relu(self.conv1(x)) return self.conv2(x) def get_inputs(): return [torch.rand([4, 12, 64, 64])] def get_init_inputs(): return [[], {'env_d': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 48 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 12 y1 = yindex // 12 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 12 * x2 + 49152 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 232 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16384 * y1), ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4096 * y3), tmp2, ymask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (232, 12, 1, 1), (12, 1, 1, 1)) assert_size_stride(primals_2, (232,), (1,)) assert_size_stride(primals_3, (4, 12, 64, 64), (49152, 4096, 64, 1)) assert_size_stride(primals_4, (4, 232, 1, 1), (232, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12, 64, 64), (49152, 1, 768, 12), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(48, 4096)](primals_3, buf0, 48, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 232, 64, 64), (950272, 1, 14848, 232)) buf2 = buf1 del buf1 triton_poi_fused_convolution_relu_1[grid(3801088)](buf2, primals_2, 3801088, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 64, 64), (16384, 1, 256, 4)) buf4 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.float32) triton_poi_fused_convolution_2[grid(16, 4096)](buf3, primals_5, buf4, 16, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del buf3 del primals_5 return buf4, primals_1, buf0, primals_4, buf2 class CAModelNew(nn.Module): def __init__(self, env_d): super(CAModelNew, self).__init__() self.conv1 = nn.Conv2d(env_d * 3, 232, 1) self.conv2 = nn.Conv2d(232, env_d, 1) nn.init.zeros_(self.conv2.weight) nn.init.zeros_(self.conv2.bias) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
PWhiddy/Growing-Neural-Cellular-Automata-Pytorch
CAModel
false
14,148
[ "Apache-2.0" ]
47
73a68e9a9cd0c3c14e590238f098937dc0f5c888
https://github.com/PWhiddy/Growing-Neural-Cellular-Automata-Pytorch/tree/73a68e9a9cd0c3c14e590238f098937dc0f5c888
ConvertPointsFromHomogeneous
import torch import torch.nn as nn def convert_points_from_homogeneous(points): """Function that converts points from homogeneous to Euclidean space. See :class:`~torchgeometry.ConvertPointsFromHomogeneous` for details. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = tgm.convert_points_from_homogeneous(input) # BxNx2 """ if not torch.is_tensor(points): raise TypeError('Input type is not a torch.Tensor. Got {}'.format( type(points))) if len(points.shape) < 2: raise ValueError('Input must be at least a 2D tensor. Got {}'. format(points.shape)) return points[..., :-1] / points[..., -1:] class ConvertPointsFromHomogeneous(nn.Module): """Creates a transformation that converts points from homogeneous to Euclidean space. Args: points (Tensor): tensor of N-dimensional points. Returns: Tensor: tensor of N-1-dimensional points. Shape: - Input: :math:`(B, D, N)` or :math:`(D, N)` - Output: :math:`(B, D, N + 1)` or :math:`(D, N + 1)` Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> transform = tgm.ConvertPointsFromHomogeneous() >>> output = transform(input) # BxNx2 """ def __init__(self): super(ConvertPointsFromHomogeneous, self).__init__() def forward(self, input): return convert_points_from_homogeneous(input) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = xindex // 3 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1), xmask) tmp1 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 / tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 3), (48, 12, 3, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(192)](arg0_1, buf0, 192, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, def convert_points_from_homogeneous(points): """Function that converts points from homogeneous to Euclidean space. See :class:`~torchgeometry.ConvertPointsFromHomogeneous` for details. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = tgm.convert_points_from_homogeneous(input) # BxNx2 """ if not torch.is_tensor(points): raise TypeError('Input type is not a torch.Tensor. Got {}'.format( type(points))) if len(points.shape) < 2: raise ValueError('Input must be at least a 2D tensor. Got {}'. format(points.shape)) return points[..., :-1] / points[..., -1:] class ConvertPointsFromHomogeneousNew(nn.Module): """Creates a transformation that converts points from homogeneous to Euclidean space. Args: points (Tensor): tensor of N-dimensional points. Returns: Tensor: tensor of N-1-dimensional points. Shape: - Input: :math:`(B, D, N)` or :math:`(D, N)` - Output: :math:`(B, D, N + 1)` or :math:`(D, N + 1)` Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> transform = tgm.ConvertPointsFromHomogeneous() >>> output = transform(input) # BxNx2 """ def __init__(self): super(ConvertPointsFromHomogeneousNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Paultool/frankmocap
ConvertPointsFromHomogeneous
false
14,149
[ "BSD-3-Clause" ]
1,612
b8bb7b587c0841b9292edb147729de581c66054c
https://github.com/Paultool/frankmocap/tree/b8bb7b587c0841b9292edb147729de581c66054c
ParallelPolarizedSelfAttention
import torch from torch import nn class ParallelPolarizedSelfAttention(nn.Module): def __init__(self, channel=512): super().__init__() self.ch_wv = nn.Conv2d(channel, channel // 2, kernel_size=(1, 1)) self.ch_wq = nn.Conv2d(channel, 1, kernel_size=(1, 1)) self.softmax_channel = nn.Softmax(1) self.softmax_spatial = nn.Softmax(-1) self.ch_wz = nn.Conv2d(channel // 2, channel, kernel_size=(1, 1)) self.ln = nn.LayerNorm(channel) self.sigmoid = nn.Sigmoid() self.sp_wv = nn.Conv2d(channel, channel // 2, kernel_size=(1, 1)) self.sp_wq = nn.Conv2d(channel, channel // 2, kernel_size=(1, 1)) self.agp = nn.AdaptiveAvgPool2d((1, 1)) def forward(self, x): b, c, h, w = x.size() channel_wv = self.ch_wv(x) channel_wq = self.ch_wq(x) channel_wv = channel_wv.reshape(b, c // 2, -1) channel_wq = channel_wq.reshape(b, -1, 1) channel_wq = self.softmax_channel(channel_wq) channel_wz = torch.matmul(channel_wv, channel_wq).unsqueeze(-1) channel_weight = self.sigmoid(self.ln(self.ch_wz(channel_wz). reshape(b, c, 1).permute(0, 2, 1))).permute(0, 2, 1).reshape(b, c, 1, 1) channel_out = channel_weight * x spatial_wv = self.sp_wv(x) spatial_wq = self.sp_wq(x) spatial_wq = self.agp(spatial_wq) spatial_wv = spatial_wv.reshape(b, c // 2, -1) spatial_wq = spatial_wq.permute(0, 2, 3, 1).reshape(b, 1, c // 2) spatial_wq = self.softmax_spatial(spatial_wq) spatial_wz = torch.matmul(spatial_wq, spatial_wv) spatial_weight = self.sigmoid(spatial_wz.reshape(b, 1, h, w)) spatial_out = spatial_weight * x out = spatial_out + channel_out return out def get_inputs(): return [torch.rand([4, 512, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None) @triton.jit def triton_red_fused__softmax_1(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 4 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) _tmp5 = tl.full([XBLOCK, RBLOCK], float('-inf'), tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp3 = tmp0 + tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = triton_helpers.maximum(_tmp5, tmp4) _tmp5 = tl.where(rmask & xmask, tmp6, _tmp5) tmp5 = triton_helpers.max2(_tmp5, 1)[:, None] tmp8 = tl.load(in_ptr1 + 0) tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) _tmp14 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp7 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tmp7 + tmp9 tmp11 = tmp10 - tmp5 tmp12 = tl_math.exp(tmp11) tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = _tmp14 + tmp13 _tmp14 = tl.where(rmask & xmask, tmp15, _tmp14) tmp14 = tl.sum(_tmp14, 1)[:, None] tmp17 = tl.load(in_ptr1 + 0) tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp16 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp19 = tmp16 + tmp18 tmp20 = tmp19 - tmp5 tmp21 = tl_math.exp(tmp20) tmp22 = tmp21 / tmp14 tl.store(out_ptr2 + (r1 + 4096 * x0), tmp22, rmask & xmask) @triton.jit def triton_poi_fused_convolution_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y0 = yindex % 256 y1 = yindex // 256 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 1048576 * y1), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4096 * y3), tmp2, None) @triton.jit def triton_per_fused_convolution_native_layer_norm_sigmoid_3(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel ): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_out_ptr0 + (r1 + 512 * x0), None) tmp1 = tl.load(in_ptr0 + r1, None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr2 + r1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = tl.full([1], 512, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp3 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 512.0 tmp17 = tmp15 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tmp21 = tmp2 - tmp10 tmp22 = tmp21 * tmp20 tmp24 = tmp22 * tmp23 tmp26 = tmp24 + tmp25 tmp27 = tl.sigmoid(tmp26) tl.store(in_out_ptr0 + (r1 + 512 * x0), tmp2, None) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp20, None) tl.store(out_ptr1 + (r1 + 512 * x0), tmp27, None) tl.store(out_ptr0 + x0, tmp10, None) @triton.jit def triton_red_fused_convolution_mean_4(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 256 x1 = xindex // 256 tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') _tmp4 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) x3 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (x0 + 256 * r2 + 32768 * x1), rmask, eviction_policy='evict_first', other=0.0) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = _tmp4 + tmp3 _tmp4 = tl.where(rmask, tmp5, _tmp4) tmp4 = tl.sum(_tmp4, 1)[:, None] tl.store(out_ptr0 + x3, tmp4, None) @triton.jit def triton_per_fused_convolution_mean_5(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 1024 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 256 x1 = xindex // 256 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 256 * r2 + 8192 * x1), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_per_fused__softmax_6(in_ptr0, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 256 * x0), None) tmp1 = 4096.0 tmp2 = tmp0 / tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp3, 0)) tmp6 = tmp2 - tmp5 tmp7 = tl_math.exp(tmp6) tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = tmp7 / tmp10 tl.store(out_ptr2 + (r1 + 256 * x0), tmp11, None) @triton.jit def triton_poi_fused_add_mul_sigmoid_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 512 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel y3 = yindex x2 = xindex y1 = yindex // 4096 y0 = yindex % 4096 tmp0 = tl.load(in_ptr0 + y3, None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + (x2 + 512 * y3), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr2 + (x2 + 512 * y1), xmask, eviction_policy= 'evict_last') tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp5 = tmp4 * tmp2 tmp6 = tmp3 + tmp5 tl.store(out_ptr0 + (y0 + 4096 * x2 + 2097152 * y1), tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 512, 64, 64), (2097152, 4096, 64, 1)) assert_size_stride(primals_2, (256, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_3, (256,), (1,)) assert_size_stride(primals_4, (1, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (512, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_7, (512,), (1,)) assert_size_stride(primals_8, (512,), (1,)) assert_size_stride(primals_9, (512,), (1,)) assert_size_stride(primals_10, (256, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_13, (256,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512 ), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(2048, 4096)](primals_1, buf0, 2048, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf2 = extern_kernels.convolution(buf0, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 64, 64), (4096, 1, 64, 1)) buf5 = empty_strided_cuda((4, 4096, 1), (4096, 1, 1), torch.float32) triton_red_fused__softmax_1[grid(4)](buf2, primals_5, buf5, 4, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 256, 64, 64), (1048576, 4096, 64, 1), torch.float32) triton_poi_fused_convolution_2[grid(1024, 4096)](buf1, primals_3, buf6, 1024, 4096, XBLOCK=64, YBLOCK=64, num_warps=8, num_stages=1) del buf1 del primals_3 buf7 = empty_strided_cuda((4, 256, 1), (256, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf6, (4, 256, 4096), ( 1048576, 4096, 1), 0), buf5, out=buf7) buf8 = extern_kernels.convolution(reinterpret_tensor(buf7, (4, 256, 1, 1), (256, 1, 1, 1), 0), primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 512, 1, 1), (512, 1, 1, 1)) buf9 = reinterpret_tensor(buf8, (4, 512, 1, 1), (512, 1, 512, 512), 0) del buf8 buf10 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) buf11 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32) buf13 = reinterpret_tensor(buf11, (4, 1, 1), (1, 1, 1), 0) del buf11 buf14 = empty_strided_cuda((4, 1, 512), (512, 2048, 1), torch.float32) triton_per_fused_convolution_native_layer_norm_sigmoid_3[grid(4)](buf9, buf13, primals_7, primals_8, primals_9, buf10, buf14, 4, 512, num_warps=4, num_stages=1) del primals_7 buf15 = extern_kernels.convolution(buf0, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf16 = extern_kernels.convolution(buf0, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf17 = empty_strided_cuda((4, 256, 1, 1, 32), (8192, 1, 32768, 32768, 256), torch.float32) triton_red_fused_convolution_mean_4[grid(32768)](buf16, primals_13, buf17, 32768, 128, XBLOCK=64, RBLOCK=8, num_warps=4, num_stages=1) del primals_13 buf18 = empty_strided_cuda((4, 256, 1, 1), (256, 1, 1024, 1024), torch.float32) triton_per_fused_convolution_mean_5[grid(1024)](buf17, buf18, 1024, 32, XBLOCK=128, num_warps=8, num_stages=1) del buf17 buf21 = empty_strided_cuda((4, 1, 256), (256, 256, 1), torch.float32) triton_per_fused__softmax_6[grid(4)](buf18, buf21, 4, 256, num_warps=2, num_stages=1) del buf18 buf22 = reinterpret_tensor(buf16, (4, 256, 64, 64), (1048576, 4096, 64, 1), 0) del buf16 triton_poi_fused_convolution_2[grid(1024, 4096)](buf15, primals_11, buf22, 1024, 4096, XBLOCK=64, YBLOCK=64, num_warps=8, num_stages=1) del buf15 del primals_11 buf23 = reinterpret_tensor(buf2, (4, 1, 4096), (4096, 4096, 1), 0) del buf2 extern_kernels.bmm(buf21, reinterpret_tensor(buf22, (4, 256, 4096), (1048576, 4096, 1), 0), out=buf23) buf24 = empty_strided_cuda((4, 512, 64, 64), (2097152, 4096, 64, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_7[grid(16384, 512)](buf23, buf0, buf14, buf24, 16384, 512, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf14 return (buf24, buf0, primals_2, primals_4, primals_6, primals_8, primals_9, primals_10, primals_12, buf5, reinterpret_tensor(buf7, ( 4, 256, 1, 1), (256, 1, 1, 1), 0), buf9, buf10, buf13, buf21, buf23, reinterpret_tensor(buf22, (4, 4096, 256), (1048576, 1, 4096), 0), reinterpret_tensor(buf6, (4, 4096, 256), (1048576, 1, 4096), 0)) class ParallelPolarizedSelfAttentionNew(nn.Module): def __init__(self, channel=512): super().__init__() self.ch_wv = nn.Conv2d(channel, channel // 2, kernel_size=(1, 1)) self.ch_wq = nn.Conv2d(channel, 1, kernel_size=(1, 1)) self.softmax_channel = nn.Softmax(1) self.softmax_spatial = nn.Softmax(-1) self.ch_wz = nn.Conv2d(channel // 2, channel, kernel_size=(1, 1)) self.ln = nn.LayerNorm(channel) self.sigmoid = nn.Sigmoid() self.sp_wv = nn.Conv2d(channel, channel // 2, kernel_size=(1, 1)) self.sp_wq = nn.Conv2d(channel, channel // 2, kernel_size=(1, 1)) self.agp = nn.AdaptiveAvgPool2d((1, 1)) def forward(self, input_0): primals_2 = self.ch_wv.weight primals_3 = self.ch_wv.bias primals_4 = self.ch_wq.weight primals_5 = self.ch_wq.bias primals_6 = self.ch_wz.weight primals_7 = self.ch_wz.bias primals_8 = self.ln.weight primals_9 = self.ln.bias primals_10 = self.sp_wv.weight primals_11 = self.sp_wv.bias primals_12 = self.sp_wq.weight primals_13 = self.sp_wq.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
Nitin-Mane/External-Attention-pytorch
ParallelPolarizedSelfAttention
false
14,150
[ "MIT" ]
4,466
1ceda306c41063af11c956334747763444a4d83f
https://github.com/Nitin-Mane/External-Attention-pytorch/tree/1ceda306c41063af11c956334747763444a4d83f
ConvertPointsToHomogeneous
import torch import torch.nn as nn def convert_points_to_homogeneous(points): """Function that converts points from Euclidean to homogeneous space. See :class:`~torchgeometry.ConvertPointsToHomogeneous` for details. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = tgm.convert_points_to_homogeneous(input) # BxNx4 """ if not torch.is_tensor(points): raise TypeError('Input type is not a torch.Tensor. Got {}'.format( type(points))) if len(points.shape) < 2: raise ValueError('Input must be at least a 2D tensor. Got {}'. format(points.shape)) return nn.functional.pad(points, (0, 1), 'constant', 1.0) class ConvertPointsToHomogeneous(nn.Module): """Creates a transformation to convert points from Euclidean to homogeneous space. Args: points (Tensor): tensor of N-dimensional points. Returns: Tensor: tensor of N+1-dimensional points. Shape: - Input: :math:`(B, D, N)` or :math:`(D, N)` - Output: :math:`(B, D, N + 1)` or :math:`(D, N + 1)` Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> transform = tgm.ConvertPointsToHomogeneous() >>> output = transform(input) # BxNx4 """ def __init__(self): super(ConvertPointsToHomogeneous, self).__init__() def forward(self, input): return convert_points_to_homogeneous(input) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp2 & xmask, other=1.0) tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(320)](arg0_1, buf0, 320, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, def convert_points_to_homogeneous(points): """Function that converts points from Euclidean to homogeneous space. See :class:`~torchgeometry.ConvertPointsToHomogeneous` for details. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = tgm.convert_points_to_homogeneous(input) # BxNx4 """ if not torch.is_tensor(points): raise TypeError('Input type is not a torch.Tensor. Got {}'.format( type(points))) if len(points.shape) < 2: raise ValueError('Input must be at least a 2D tensor. Got {}'. format(points.shape)) return nn.functional.pad(points, (0, 1), 'constant', 1.0) class ConvertPointsToHomogeneousNew(nn.Module): """Creates a transformation to convert points from Euclidean to homogeneous space. Args: points (Tensor): tensor of N-dimensional points. Returns: Tensor: tensor of N+1-dimensional points. Shape: - Input: :math:`(B, D, N)` or :math:`(D, N)` - Output: :math:`(B, D, N + 1)` or :math:`(D, N + 1)` Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> transform = tgm.ConvertPointsToHomogeneous() >>> output = transform(input) # BxNx4 """ def __init__(self): super(ConvertPointsToHomogeneousNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Paultool/frankmocap
ConvertPointsToHomogeneous
false
14,151
[ "BSD-3-Clause" ]
1,612
b8bb7b587c0841b9292edb147729de581c66054c
https://github.com/Paultool/frankmocap/tree/b8bb7b587c0841b9292edb147729de581c66054c
Affine
import torch from torch import nn class Affine(nn.Module): def __init__(self, channel): super().__init__() self.g = nn.Parameter(torch.ones(1, 1, channel)) self.b = nn.Parameter(torch.zeros(1, 1, channel)) def forward(self, x): return x * self.g + self.b def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channel': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](primals_2, primals_1, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_3 return buf0, primals_2 class AffineNew(nn.Module): def __init__(self, channel): super().__init__() self.g = nn.Parameter(torch.ones(1, 1, channel)) self.b = nn.Parameter(torch.zeros(1, 1, channel)) def forward(self, input_0): primals_1 = self.g primals_3 = self.b primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Nitin-Mane/External-Attention-pytorch
Affine
false
14,152
[ "MIT" ]
4,466
1ceda306c41063af11c956334747763444a4d83f
https://github.com/Nitin-Mane/External-Attention-pytorch/tree/1ceda306c41063af11c956334747763444a4d83f
LogitCosineDistance
import torch import torch.utils.data.dataloader import torch.nn def dot_product(a: 'torch.Tensor', b: 'torch.Tensor', normalize=False): """ Computes dot product for pairs of vectors. :param normalize: Vectors are normalized (leads to cosine similarity) :return: Matrix with res[i][j] = dot_product(a[i], b[j]) """ if len(a.shape) == 1: a = a.unsqueeze(0) if len(b.shape) == 1: b = b.unsqueeze(0) if normalize: a = torch.nn.functional.normalize(a, p=2, dim=1) b = torch.nn.functional.normalize(b, p=2, dim=1) return torch.mm(a, b.transpose(0, 1)) class LogitCosineDistance(torch.nn.Module): def forward(self, a, b): return torch.logit(0.5 - 0.5 * dot_product(a, b, normalize=True)) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.utils.data.dataloader import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_logit_mul_rsub_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp1 - tmp2 tmp4 = -1.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = 2.0 tmp7 = triton_helpers.minimum(tmp5, tmp6) tmp8 = 1.0 tmp9 = tmp8 - tmp7 tmp10 = tmp7 / tmp9 tmp11 = tl_math.log(tmp10) tl.store(in_out_ptr0 + x0, tmp11, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_0[grid(16)](arg1_1, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) del buf0 del buf1 buf3 = buf2 del buf2 triton_poi_fused_logit_mul_rsub_1[grid(16)](buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf3, def dot_product(a: 'torch.Tensor', b: 'torch.Tensor', normalize=False): """ Computes dot product for pairs of vectors. :param normalize: Vectors are normalized (leads to cosine similarity) :return: Matrix with res[i][j] = dot_product(a[i], b[j]) """ if len(a.shape) == 1: a = a.unsqueeze(0) if len(b.shape) == 1: b = b.unsqueeze(0) if normalize: a = torch.nn.functional.normalize(a, p=2, dim=1) b = torch.nn.functional.normalize(b, p=2, dim=1) return torch.mm(a, b.transpose(0, 1)) class LogitCosineDistanceNew(torch.nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ParikhKadam/flair
LogitCosineDistance
false
14,153
[ "MIT" ]
7,539
a1732bc5ab0b4aeb09d1ed3a630ae2fd8fa095ef
https://github.com/ParikhKadam/flair/tree/a1732bc5ab0b4aeb09d1ed3a630ae2fd8fa095ef
CosineDistance
import torch import torch.utils.data.dataloader import torch.nn def dot_product(a: 'torch.Tensor', b: 'torch.Tensor', normalize=False): """ Computes dot product for pairs of vectors. :param normalize: Vectors are normalized (leads to cosine similarity) :return: Matrix with res[i][j] = dot_product(a[i], b[j]) """ if len(a.shape) == 1: a = a.unsqueeze(0) if len(b.shape) == 1: b = b.unsqueeze(0) if normalize: a = torch.nn.functional.normalize(a, p=2, dim=1) b = torch.nn.functional.normalize(b, p=2, dim=1) return torch.mm(a, b.transpose(0, 1)) class CosineDistance(torch.nn.Module): def forward(self, a, b): return -dot_product(a, b, normalize=True) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data.dataloader import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_neg_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = -tmp0 tl.store(in_out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_0[grid(16)](arg1_1, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) del buf0 del buf1 buf3 = buf2 del buf2 triton_poi_fused_neg_1[grid(16)](buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf3, def dot_product(a: 'torch.Tensor', b: 'torch.Tensor', normalize=False): """ Computes dot product for pairs of vectors. :param normalize: Vectors are normalized (leads to cosine similarity) :return: Matrix with res[i][j] = dot_product(a[i], b[j]) """ if len(a.shape) == 1: a = a.unsqueeze(0) if len(b.shape) == 1: b = b.unsqueeze(0) if normalize: a = torch.nn.functional.normalize(a, p=2, dim=1) b = torch.nn.functional.normalize(b, p=2, dim=1) return torch.mm(a, b.transpose(0, 1)) class CosineDistanceNew(torch.nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ParikhKadam/flair
CosineDistance
false
14,154
[ "MIT" ]
7,539
a1732bc5ab0b4aeb09d1ed3a630ae2fd8fa095ef
https://github.com/ParikhKadam/flair/tree/a1732bc5ab0b4aeb09d1ed3a630ae2fd8fa095ef
CRF
import torch import torch.utils.data.dataloader import torch.nn class CRF(torch.nn.Module): """ Conditional Random Field Implementation according to sgrvinod (https://github.com/sgrvinod). Classifier which predicts single tag / class / label for given word based on not just the word, but also on previous seen annotations. """ def __init__(self, tag_dictionary, tagset_size: 'int', init_from_state_dict: 'bool'): """ :param tag_dictionary: tag dictionary in order to find ID for start and stop tags :param tagset_size: number of tag from tag dictionary :param init_from_state_dict: whether we load pretrained model from state dict """ super(CRF, self).__init__() self.tagset_size = tagset_size self.transitions = torch.nn.Parameter(torch.randn(tagset_size, tagset_size)) if not init_from_state_dict: self.transitions.detach()[tag_dictionary.get_idx_for_item( START_TAG), :] = -10000 self.transitions.detach()[:, tag_dictionary.get_idx_for_item( STOP_TAG)] = -10000 self def forward(self, features: 'torch.Tensor') ->torch.Tensor: """ Forward propagation of Conditional Random Field. :param features: output from RNN / Linear layer in shape (batch size, seq len, hidden size) :return: CRF scores (emission scores for each token + transitions prob from previous state) in shape (batch_size, seq len, tagset size, tagset size) """ batch_size, seq_len = features.size()[:2] emission_scores = features emission_scores = emission_scores.unsqueeze(-1).expand(batch_size, seq_len, self.tagset_size, self.tagset_size) crf_scores = emission_scores + self.transitions.unsqueeze(0).unsqueeze( 0) return crf_scores def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'tag_dictionary': 4, 'tagset_size': 4, 'init_from_state_dict': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data.dataloader import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x4 = xindex % 16 x5 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x5, tmp2, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, class CRFNew(torch.nn.Module): """ Conditional Random Field Implementation according to sgrvinod (https://github.com/sgrvinod). Classifier which predicts single tag / class / label for given word based on not just the word, but also on previous seen annotations. """ def __init__(self, tag_dictionary, tagset_size: 'int', init_from_state_dict: 'bool'): """ :param tag_dictionary: tag dictionary in order to find ID for start and stop tags :param tagset_size: number of tag from tag dictionary :param init_from_state_dict: whether we load pretrained model from state dict """ super(CRFNew, self).__init__() self.tagset_size = tagset_size self.transitions = torch.nn.Parameter(torch.randn(tagset_size, tagset_size)) if not init_from_state_dict: self.transitions.detach()[tag_dictionary.get_idx_for_item( START_TAG), :] = -10000 self.transitions.detach()[:, tag_dictionary.get_idx_for_item( STOP_TAG)] = -10000 self def forward(self, input_0): primals_2 = self.transitions primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
ParikhKadam/flair
CRF
false
14,155
[ "MIT" ]
7,539
a1732bc5ab0b4aeb09d1ed3a630ae2fd8fa095ef
https://github.com/ParikhKadam/flair/tree/a1732bc5ab0b4aeb09d1ed3a630ae2fd8fa095ef
VectorCrossEntropy
import torch import torch.nn as nn class VectorCrossEntropy(nn.Module): def __init__(self): super().__init__() self._log_softmax = nn.LogSoftmax(dim=1) def forward(self, input, target): input = self._log_softmax(input) loss = -torch.sum(input * target) loss = loss / input.shape[0] return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + r3, None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = -tmp18 tmp20 = 0.25 tmp21 = tmp19 * tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused__log_softmax_div_mul_neg_sum_1[grid(1)](buf2, buf0, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg1_1 del buf0 return buf2, class VectorCrossEntropyNew(nn.Module): def __init__(self): super().__init__() self._log_softmax = nn.LogSoftmax(dim=1) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PavelOstyakov/pipeline
VectorCrossEntropy
false
14,156
[ "MIT" ]
214
236c050af3be9dbb534e959589040e9433501e2b
https://github.com/PavelOstyakov/pipeline/tree/236c050af3be9dbb534e959589040e9433501e2b
PositionAttentionModule
import torch import numpy as np from torch import nn from torch.nn import init class ScaledDotProductAttention(nn.Module): """ Scaled dot-product attention """ def __init__(self, d_model, d_k, d_v, h, dropout=0.1): """ :param d_model: Output dimensionality of the model :param d_k: Dimensionality of queries and keys :param d_v: Dimensionality of values :param h: Number of heads """ super(ScaledDotProductAttention, self).__init__() self.fc_q = nn.Linear(d_model, h * d_k) self.fc_k = nn.Linear(d_model, h * d_k) self.fc_v = nn.Linear(d_model, h * d_v) self.fc_o = nn.Linear(h * d_v, d_model) self.dropout = nn.Dropout(dropout) self.d_model = d_model self.d_k = d_k self.d_v = d_v self.h = h self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.001) if m.bias is not None: init.constant_(m.bias, 0) def forward(self, queries, keys, values, attention_mask=None, attention_weights=None): """ Computes :param queries: Queries (b_s, nq, d_model) :param keys: Keys (b_s, nk, d_model) :param values: Values (b_s, nk, d_model) :param attention_mask: Mask over attention values (b_s, h, nq, nk). True indicates masking. :param attention_weights: Multiplicative weights for attention values (b_s, h, nq, nk). :return: """ b_s, nq = queries.shape[:2] nk = keys.shape[1] q = self.fc_q(queries).view(b_s, nq, self.h, self.d_k).permute(0, 2, 1, 3) k = self.fc_k(keys).view(b_s, nk, self.h, self.d_k).permute(0, 2, 3, 1) v = self.fc_v(values).view(b_s, nk, self.h, self.d_v).permute(0, 2, 1, 3) att = torch.matmul(q, k) / np.sqrt(self.d_k) if attention_weights is not None: att = att * attention_weights if attention_mask is not None: att = att.masked_fill(attention_mask, -np.inf) att = torch.softmax(att, -1) att = self.dropout(att) out = torch.matmul(att, v).permute(0, 2, 1, 3).contiguous().view(b_s, nq, self.h * self.d_v) out = self.fc_o(out) return out class PositionAttentionModule(nn.Module): def __init__(self, d_model=512, kernel_size=3, H=7, W=7): super().__init__() self.cnn = nn.Conv2d(d_model, d_model, kernel_size=kernel_size, padding=(kernel_size - 1) // 2) self.pa = ScaledDotProductAttention(d_model, d_k=d_model, d_v= d_model, h=1) def forward(self, x): bs, c, _h, _w = x.shape y = self.cnn(x) y = y.view(bs, c, -1).permute(0, 2, 1) y = self.pa(y, y, y) return y def get_inputs(): return [torch.rand([4, 512, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np from torch import nn from torch.nn import init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_clone_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, None) @triton.jit def triton_red_fused__softmax_sqrt_3(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex _tmp9 = tl.full([XBLOCK, RBLOCK], float('-inf'), tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask, eviction_policy= 'evict_last', other=0.0) tmp1 = tl.full([1, 1], 22.62741699796952, tl.float64) tmp2 = tl.full([1, 1], 0.0, tl.float64) tmp3 = tmp1 >= tmp2 tmp4 = 1.0 tmp5 = -1.0 tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = tmp0 * tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = triton_helpers.maximum(_tmp9, tmp8) _tmp9 = tl.where(rmask, tmp10, _tmp9) tmp9 = triton_helpers.max2(_tmp9, 1)[:, None] _tmp26 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp11 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask, eviction_policy= 'evict_last', other=0.0) tmp12 = tl.full([1, 1], 22.62741699796952, tl.float64) tmp13 = tl.full([1, 1], 0.0, tl.float64) tmp14 = tmp12 >= tmp13 tmp15 = 1.0 tmp16 = -1.0 tmp17 = tl.where(tmp14, tmp15, tmp16) tmp18 = tmp11 * tmp17 tmp19 = tmp18 - tmp9 tmp20 = tmp17.to(tl.float64) tmp21 = tmp20 * tmp12 tmp22 = tmp21.to(tl.float32) tmp23 = tmp19 / tmp22 tmp24 = tl_math.exp(tmp23) tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = _tmp26 + tmp25 _tmp26 = tl.where(rmask, tmp27, _tmp26) tmp26 = tl.sum(_tmp26, 1)[:, None] for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp28 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask, eviction_policy= 'evict_first', other=0.0) tmp29 = tl.full([1, 1], 22.62741699796952, tl.float64) tmp30 = tl.full([1, 1], 0.0, tl.float64) tmp31 = tmp29 >= tmp30 tmp32 = 1.0 tmp33 = -1.0 tmp34 = tl.where(tmp31, tmp32, tmp33) tmp35 = tmp28 * tmp34 tmp36 = tmp35 - tmp9 tmp37 = tmp34.to(tl.float64) tmp38 = tmp37 * tmp29 tmp39 = tmp38.to(tl.float32) tmp40 = tmp36 / tmp39 tmp41 = tl_math.exp(tmp40) tmp42 = tmp41 / tmp26 tl.store(out_ptr2 + (r1 + 4096 * x0), tmp42, rmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 512, 64, 64), (2097152, 4096, 64, 1)) assert_size_stride(primals_2, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_3, (512,), (1,)) assert_size_stride(primals_4, (512, 512), (512, 1)) assert_size_stride(primals_5, (512,), (1,)) assert_size_stride(primals_6, (512, 512), (512, 1)) assert_size_stride(primals_7, (512,), (1,)) assert_size_stride(primals_8, (512, 512), (512, 1)) assert_size_stride(primals_9, (512,), (1,)) assert_size_stride(primals_10, (512, 512), (512, 1)) assert_size_stride(primals_11, (512,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512 ), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(2048, 4096)](primals_1, buf0, 2048, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_1[grid(262144, 9)](primals_2, buf1, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf0, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 512, 64, 64), (2097152, 1, 32768, 512)) buf3 = reinterpret_tensor(buf2, (4, 4096, 512), (2097152, 512, 1), 0) del buf2 triton_poi_fused_clone_2[grid(8388608)](buf3, primals_3, 8388608, XBLOCK=1024, num_warps=4, num_stages=1) del primals_3 buf4 = empty_strided_cuda((16384, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (16384, 512), (512, 1), 0), reinterpret_tensor(primals_4, (512, 512), (1, 512), 0), out =buf4) buf5 = empty_strided_cuda((16384, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (16384, 512), (512, 1), 0), reinterpret_tensor(primals_6, (512, 512), (1, 512), 0), out =buf5) buf6 = empty_strided_cuda((16384, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (16384, 512), (512, 1), 0), reinterpret_tensor(primals_8, (512, 512), (1, 512), 0), out =buf6) buf7 = reinterpret_tensor(buf4, (4, 4096, 512), (2097152, 512, 1), 0) del buf4 triton_poi_fused_clone_2[grid(8388608)](buf7, primals_5, 8388608, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf8 = reinterpret_tensor(buf5, (4, 4096, 512), (2097152, 512, 1), 0) del buf5 triton_poi_fused_clone_2[grid(8388608)](buf8, primals_7, 8388608, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf9 = empty_strided_cuda((4, 4096, 4096), (16777216, 4096, 1), torch.float32) extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (4, 512, 4096), ( 2097152, 1, 512), 0), out=buf9) buf12 = empty_strided_cuda((4, 1, 4096, 4096), (16777216, 1, 4096, 1), torch.float32) triton_red_fused__softmax_sqrt_3[grid(16384)](buf9, buf12, 16384, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) del buf9 buf13 = reinterpret_tensor(buf6, (4, 4096, 512), (2097152, 512, 1), 0) del buf6 triton_poi_fused_clone_2[grid(8388608)](buf13, primals_9, 8388608, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf14 = empty_strided_cuda((4, 4096, 512), (2097152, 512, 1), torch .float32) extern_kernels.bmm(reinterpret_tensor(buf12, (4, 4096, 4096), ( 16777216, 4096, 1), 0), buf13, out=buf14) buf15 = empty_strided_cuda((16384, 512), (512, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf14, (16384, 512), (512, 1), 0), reinterpret_tensor(primals_10, (512, 512), (1, 512), 0), alpha=1, beta=1, out=buf15) del primals_11 return reinterpret_tensor(buf15, (4, 4096, 512), (2097152, 512, 1), 0 ), buf0, buf1, reinterpret_tensor(buf3, (16384, 512), (512, 1), 0 ), buf12, reinterpret_tensor(buf14, (16384, 512), (512, 1), 0 ), primals_10, reinterpret_tensor(buf13, (4, 512, 4096), (2097152, 1, 512), 0), reinterpret_tensor(buf7, (4, 512, 4096), (2097152, 1, 512), 0), buf8, primals_8, primals_6, primals_4 class ScaledDotProductAttention(nn.Module): """ Scaled dot-product attention """ def __init__(self, d_model, d_k, d_v, h, dropout=0.1): """ :param d_model: Output dimensionality of the model :param d_k: Dimensionality of queries and keys :param d_v: Dimensionality of values :param h: Number of heads """ super(ScaledDotProductAttention, self).__init__() self.fc_q = nn.Linear(d_model, h * d_k) self.fc_k = nn.Linear(d_model, h * d_k) self.fc_v = nn.Linear(d_model, h * d_v) self.fc_o = nn.Linear(h * d_v, d_model) self.dropout = nn.Dropout(dropout) self.d_model = d_model self.d_k = d_k self.d_v = d_v self.h = h self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.001) if m.bias is not None: init.constant_(m.bias, 0) def forward(self, queries, keys, values, attention_mask=None, attention_weights=None): """ Computes :param queries: Queries (b_s, nq, d_model) :param keys: Keys (b_s, nk, d_model) :param values: Values (b_s, nk, d_model) :param attention_mask: Mask over attention values (b_s, h, nq, nk). True indicates masking. :param attention_weights: Multiplicative weights for attention values (b_s, h, nq, nk). :return: """ b_s, nq = queries.shape[:2] nk = keys.shape[1] q = self.fc_q(queries).view(b_s, nq, self.h, self.d_k).permute(0, 2, 1, 3) k = self.fc_k(keys).view(b_s, nk, self.h, self.d_k).permute(0, 2, 3, 1) v = self.fc_v(values).view(b_s, nk, self.h, self.d_v).permute(0, 2, 1, 3) att = torch.matmul(q, k) / np.sqrt(self.d_k) if attention_weights is not None: att = att * attention_weights if attention_mask is not None: att = att.masked_fill(attention_mask, -np.inf) att = torch.softmax(att, -1) att = self.dropout(att) out = torch.matmul(att, v).permute(0, 2, 1, 3).contiguous().view(b_s, nq, self.h * self.d_v) out = self.fc_o(out) return out class PositionAttentionModuleNew(nn.Module): def __init__(self, d_model=512, kernel_size=3, H=7, W=7): super().__init__() self.cnn = nn.Conv2d(d_model, d_model, kernel_size=kernel_size, padding=(kernel_size - 1) // 2) self.pa = ScaledDotProductAttention(d_model, d_k=d_model, d_v= d_model, h=1) def forward(self, input_0): primals_2 = self.cnn.weight primals_3 = self.cnn.bias primals_4 = self.pa.fc_q.weight primals_5 = self.pa.fc_q.bias primals_6 = self.pa.fc_k.weight primals_7 = self.pa.fc_k.bias primals_8 = self.pa.fc_v.weight primals_9 = self.pa.fc_v.bias primals_10 = self.pa.fc_o.weight primals_11 = self.pa.fc_o.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
Nitin-Mane/External-Attention-pytorch
PositionAttentionModule
false
14,157
[ "MIT" ]
4,466
1ceda306c41063af11c956334747763444a4d83f
https://github.com/Nitin-Mane/External-Attention-pytorch/tree/1ceda306c41063af11c956334747763444a4d83f
IrisClassifier
import torch import torch.nn as nn import torch.nn.functional as F import torch.onnx class IrisClassifier(nn.Module): def __init__(self): super(IrisClassifier, self).__init__() self.fc1 = nn.Linear(4, 10) self.fc2 = nn.Linear(10, 10) self.fc3 = nn.Linear(10, 3) def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = F.dropout(x, 0.2) x = self.fc3(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 10 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (10, 4), (4, 1)) assert_size_stride(primals_2, (10,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (10, 10), (10, 1)) assert_size_stride(primals_5, (10,), (1,)) assert_size_stride(primals_6, (3, 10), (10, 1)) assert_size_stride(primals_7, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 10), (10, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 10), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 10), (160, 40, 10, 1), 0) del buf0 buf9 = empty_strided_cuda((4, 4, 4, 10), (160, 40, 10, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(640)](buf1, primals_2, buf9, 640, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 10), (10, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 10), (10, 1), 0), reinterpret_tensor(primals_4, (10, 10), (1, 10), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 10), (160, 40, 10, 1), 0) del buf2 buf8 = empty_strided_cuda((4, 4, 4, 10), (160, 40, 10, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(640)](buf3, primals_5, buf8, 640, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = torch.ops.aten.native_dropout.default(buf3, 0.2, True) del buf3 buf5 = buf4[0] buf6 = buf4[1] del buf4 buf7 = empty_strided_cuda((64, 3), (3, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf5, (64, 10), (10, 1), 0), reinterpret_tensor(primals_6, (10, 3), (1, 10), 0), alpha=1, beta=1, out=buf7) del primals_7 return reinterpret_tensor(buf7, (4, 4, 4, 3), (48, 12, 3, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 10), (10, 1), 0 ), buf6, reinterpret_tensor(buf5, (64, 10), (10, 1), 0 ), primals_6, buf8, primals_4, buf9 class IrisClassifierNew(nn.Module): def __init__(self): super(IrisClassifierNew, self).__init__() self.fc1 = nn.Linear(4, 10) self.fc2 = nn.Linear(10, 10) self.fc3 = nn.Linear(10, 3) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
PeterSulcs/mlflow
IrisClassifier
false
14,158
[ "Apache-2.0" ]
10,351
14c48e7bb1ca6cd6a3c1b249a486cd98bd5e7051
https://github.com/PeterSulcs/mlflow/tree/14c48e7bb1ca6cd6a3c1b249a486cd98bd5e7051
SoftmaxCELoss
from torch.nn import Module import torch from torch.nn import functional as F from torch import nn class SoftmaxCELoss(Module): def __init__(self, num_classes, num_features, dropout=0.5): super(SoftmaxCELoss, self).__init__() self.num_classes = num_classes self.num_features = num_features self.dropout = dropout self.classifier = nn.Linear(self.num_features, self.num_classes, bias=False) if self.dropout > 0: self.drop = nn.Dropout(self.dropout) self.reset_parameters() def reset_parameters(self): self.classifier.reset_parameters() def _check_input_dim(self, input): if input.dim() != 2: raise ValueError('expected 2D input (got {}D input)'.format( input.dim())) def forward(self, feature, target): self._check_input_dim(feature) x = feature if self.dropout > 0: x = self.drop(x) logits = self.classifier(x) loss = F.cross_entropy(logits, target, reduction='none') with torch.no_grad(): _, preds = torch.max(logits, 1) acc = (preds == target).float() return loss, acc def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'num_classes': 4, 'num_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_mul_neg_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp24 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 + tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 - tmp11 tmp14 = tmp12 * tmp13 tmp15 = tmp2 - tmp11 tmp17 = tmp15 * tmp16 tmp18 = tmp14 + tmp17 tmp19 = tmp5 - tmp11 tmp21 = tmp19 * tmp20 tmp22 = tmp18 + tmp21 tmp23 = tmp8 - tmp11 tmp25 = tmp23 * tmp24 tmp26 = tmp22 + tmp25 tmp27 = -tmp26 tl.store(out_ptr0 + x0, tmp27, xmask) @triton.jit def triton_poi_fused_max_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp32 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1], 0, tl.int64) tmp11 = tl.full([1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 > tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 > tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tl.store(out_ptr0 + x0, tmp46, xmask) @triton.jit def triton_poi_fused__to_copy_eq_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + x2, xmask) tmp1 = tmp0.to(tl.float32) tmp3 = tmp1 == tmp2 tmp4 = tmp3.to(tl.float32) tl.store(out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(16)](buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused__log_softmax_mul_neg_sum_1[grid(4)](buf1, primals_3, buf2, 4, XBLOCK=4, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4,), (1,), torch.int64) triton_poi_fused_max_2[grid(4)](buf0, buf3, 4, XBLOCK=4, num_warps= 1, num_stages=1) buf4 = buf1 del buf1 triton_poi_fused__to_copy_eq_3[grid(16)](buf3, primals_3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf3 return buf2, buf4, primals_1, primals_3, buf0 class SoftmaxCELossNew(Module): def __init__(self, num_classes, num_features, dropout=0.5): super(SoftmaxCELossNew, self).__init__() self.num_classes = num_classes self.num_features = num_features self.dropout = dropout self.classifier = nn.Linear(self.num_features, self.num_classes, bias=False) if self.dropout > 0: self.drop = nn.Dropout(self.dropout) self.reset_parameters() def reset_parameters(self): self.classifier.reset_parameters() def _check_input_dim(self, input): if input.dim() != 2: raise ValueError('expected 2D input (got {}D input)'.format( input.dim())) def forward(self, input_0, input_1): primals_1 = self.classifier.weight primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
Pandinosaurus/RandPerson
SoftmaxCELoss
false
14,159
[ "Apache-2.0" ]
83
7dd503cc1d063d95b8cf6b43d40bb93452192d6d
https://github.com/Pandinosaurus/RandPerson/tree/7dd503cc1d063d95b8cf6b43d40bb93452192d6d
CoordFC
import torch import numpy as np from torch import nn class SinActivation(nn.Module): def __init__(self): super(SinActivation, self).__init__() def forward(self, x): return torch.sin(x) class CoordFC(nn.Module): def __init__(self, input_dim, hidden_dim): super().__init__() self.layer = nn.Linear(input_dim, hidden_dim) nn.init.uniform_(self.layer.weight, -np.sqrt(9 / input_dim), np. sqrt(9 / input_dim)) self.act = SinActivation() pass def forward(self, x): x = self.layer(x) out = self.act(x) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'hidden_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sin_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.sin(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sin_0[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0 class SinActivation(nn.Module): def __init__(self): super(SinActivation, self).__init__() def forward(self, x): return torch.sin(x) class CoordFCNew(nn.Module): def __init__(self, input_dim, hidden_dim): super().__init__() self.layer = nn.Linear(input_dim, hidden_dim) nn.init.uniform_(self.layer.weight, -np.sqrt(9 / input_dim), np. sqrt(9 / input_dim)) self.act = SinActivation() pass def forward(self, input_0): primals_1 = self.layer.weight primals_2 = self.layer.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
PeterouZh/CIPS-3D
CoordFC
false
14,160
[ "MIT" ]
308
9b8bfa0fb23f642af042e150ccd70408f9d137c6
https://github.com/PeterouZh/CIPS-3D/tree/9b8bfa0fb23f642af042e150ccd70408f9d137c6
LinearScale
import torch from torch import nn class LinearScale(nn.Module): def __init__(self, scale, bias): super(LinearScale, self).__init__() self.scale_v = scale self.bias_v = bias pass def forward(self, x): out = x * self.scale_v + self.bias_v return out def __repr__(self): repr = ( f'{self.__class__.__name__}(scale_v={self.scale_v},bias_v={self.bias_v})' ) return repr def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'scale': 1.0, 'bias': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = 4.0 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class LinearScaleNew(nn.Module): def __init__(self, scale, bias): super(LinearScaleNew, self).__init__() self.scale_v = scale self.bias_v = bias pass def __repr__(self): repr = ( f'{self.__class__.__name__}(scale_v={self.scale_v},bias_v={self.bias_v})' ) return repr def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PeterouZh/CIPS-3D
LinearScale
false
14,161
[ "MIT" ]
308
9b8bfa0fb23f642af042e150ccd70408f9d137c6
https://github.com/PeterouZh/CIPS-3D/tree/9b8bfa0fb23f642af042e150ccd70408f9d137c6
CoordConvSinAct
import torch from torch import nn class SinAct(nn.Module): def __init__(self): super(SinAct, self).__init__() def forward(self, x): return torch.sin(x) class CoordConvSinAct(nn.Module): """ Source: https://github.com/mkocabas/CoordConv-pytorch/blob/master/CoordConv.py """ def __init__(self, in_channels, out_channels, channels_per_group=16, ** kwargs): super().__init__() self.coord_conv = nn.Conv2d(2, out_channels, **kwargs) self.sin_act = SinAct() self.conv = nn.Conv2d(in_channels, out_channels, **kwargs) pass def forward(self, input): batch, _, H, W = input.shape x, y = torch.meshgrid(torch.linspace(-1, 1, W, device=input.device), torch.linspace(-1, 1, H, device=input.device)) x = x.T y = y.T xy = torch.stack((x, y), dim=0) xy = xy.expand((batch, -1, -1, -1)) xy_fea = self.coord_conv(xy) xy_fea = self.sin_act(xy_fea) out = self.conv(input) out = xy_fea + out return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_stack_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = 2.0 tmp8 = tmp6 < tmp7 tmp9 = 0.6666666666666666 tmp10 = tmp6 * tmp9 tmp11 = -1.0 tmp12 = tmp10 + tmp11 tmp13 = 3 + -1 * x0 tmp14 = tmp13.to(tl.float32) tmp15 = tmp14 * tmp9 tmp16 = 1.0 tmp17 = tmp16 - tmp15 tmp18 = tl.where(tmp8, tmp12, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp4, tmp18, tmp19) tmp21 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp24 = -4 + x1 tmp25 = tmp24.to(tl.float32) tmp26 = tmp25 < tmp7 tmp27 = tmp25 * tmp9 tmp28 = tmp27 + tmp11 tmp29 = 3 + -1 * (-4 + x1) tmp30 = tmp29.to(tl.float32) tmp31 = tmp30 * tmp9 tmp32 = tmp16 - tmp31 tmp33 = tl.where(tmp26, tmp28, tmp32) tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp21, tmp33, tmp34) tmp36 = tl.where(tmp4, tmp20, tmp35) tl.store(out_ptr0 + x2, tmp36, xmask) @triton.jit def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 32 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_add_convolution_sin_2(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_out_ptr1 + x2, xmask) tmp5 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl_math.sin(tmp2) tmp6 = tmp4 + tmp5 tmp7 = tmp3 + tmp6 tl.store(in_out_ptr0 + x2, tmp2, xmask) tl.store(in_out_ptr1 + x2, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 2, 4, 4), (32, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_stack_0[grid(32)](buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) triton_poi_fused_convolution_1[grid(128)](buf0, buf1, 128, XBLOCK= 128, num_warps=4, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1)) del buf1 buf4 = extern_kernels.convolution(primals_1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1)) buf3 = buf2 del buf2 buf5 = buf4 del buf4 triton_poi_fused_add_convolution_sin_2[grid(16)](buf3, buf5, primals_3, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 del primals_5 return buf5, primals_1, primals_2, primals_4, buf0, buf3 class SinAct(nn.Module): def __init__(self): super(SinAct, self).__init__() def forward(self, x): return torch.sin(x) class CoordConvSinActNew(nn.Module): """ Source: https://github.com/mkocabas/CoordConv-pytorch/blob/master/CoordConv.py """ def __init__(self, in_channels, out_channels, channels_per_group=16, ** kwargs): super().__init__() self.coord_conv = nn.Conv2d(2, out_channels, **kwargs) self.sin_act = SinAct() self.conv = nn.Conv2d(in_channels, out_channels, **kwargs) pass def forward(self, input_0): primals_2 = self.coord_conv.weight primals_3 = self.coord_conv.bias primals_1 = self.conv.weight primals_5 = self.conv.bias primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
PeterouZh/CIPS-3D
CoordConvSinAct
false
14,162
[ "MIT" ]
308
9b8bfa0fb23f642af042e150ccd70408f9d137c6
https://github.com/PeterouZh/CIPS-3D/tree/9b8bfa0fb23f642af042e150ccd70408f9d137c6
GlobalAveragePooling
import torch from torch import nn class GlobalAveragePooling(nn.Module): def __init__(self): super().__init__() def forward(self, x): return x.mean([2, 3]) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, arg0_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf1, class GlobalAveragePoolingNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PeterouZh/CIPS-3D
GlobalAveragePooling
false
14,163
[ "MIT" ]
308
9b8bfa0fb23f642af042e150ccd70408f9d137c6
https://github.com/PeterouZh/CIPS-3D/tree/9b8bfa0fb23f642af042e150ccd70408f9d137c6
EuclideanDistance
import torch from torch import Tensor import torch.utils.data.dataloader from torch import nn import torch.nn def arccosh(x): """Compute the arcosh, numerically stable.""" x = torch.clamp(x, min=1 + EPSILON) a = torch.log(x) b = torch.log1p(torch.sqrt(x * x - 1) / x) return a + b def mdot(x, y): """Compute the inner product.""" m = x.new_ones(1, x.size(1)) m[0, 0] = -1 return torch.sum(m * x * y, 1, keepdim=True) def dist(x, y): """Get the hyperbolic distance between x and y.""" return arccosh(-mdot(x, y)) class EuclideanDistance(nn.Module): """Implement a EuclideanDistance object.""" def forward(self, mat_1: 'Tensor', mat_2: 'Tensor') ->Tensor: """Returns the squared euclidean distance between each element in mat_1 and each element in mat_2. Parameters ---------- mat_1: torch.Tensor matrix of shape (n_1, n_features) mat_2: torch.Tensor matrix of shape (n_2, n_features) Returns ------- dist: torch.Tensor distance matrix of shape (n_1, n_2) """ _dist = [torch.sum((mat_1 - mat_2[i]) ** 2, dim=1) for i in range( mat_2.size(0))] dist = torch.stack(_dist, dim=1) return dist def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data.dataloader from torch import nn import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_stack_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 16 x0 = xindex % 4 x2 = xindex // 64 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp7 tmp9 = tl.load(in_ptr0 + (16 + x0 + 4 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp10 = tl.load(in_ptr1 + (16 + x0 + 4 * x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp14 = tl.load(in_ptr0 + (32 + x0 + 4 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp15 = tl.load(in_ptr1 + (32 + x0 + 4 * x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = tl.load(in_ptr0 + (48 + x0 + 4 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp20 = tl.load(in_ptr1 + (48 + x0 + 4 * x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tmp19 - tmp20 tmp22 = tmp21 * tmp21 tmp23 = tmp18 + tmp22 tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp4, tmp23, tmp24) tmp26 = tmp0 >= tmp3 tmp27 = tl.full([1], 8, tl.int64) tmp28 = tmp0 < tmp27 tmp29 = tmp26 & tmp28 tmp30 = tl.load(in_ptr0 + (x0 + 4 * (-4 + x1) + 64 * x2), tmp29 & xmask, other=0.0) tmp31 = tl.load(in_ptr1 + (64 + x0 + 4 * (-4 + x1)), tmp29 & xmask, eviction_policy='evict_last', other=0.0) tmp32 = tmp30 - tmp31 tmp33 = tmp32 * tmp32 tmp34 = tl.load(in_ptr0 + (16 + x0 + 4 * (-4 + x1) + 64 * x2), tmp29 & xmask, other=0.0) tmp35 = tl.load(in_ptr1 + (80 + x0 + 4 * (-4 + x1)), tmp29 & xmask, eviction_policy='evict_last', other=0.0) tmp36 = tmp34 - tmp35 tmp37 = tmp36 * tmp36 tmp38 = tmp33 + tmp37 tmp39 = tl.load(in_ptr0 + (32 + x0 + 4 * (-4 + x1) + 64 * x2), tmp29 & xmask, other=0.0) tmp40 = tl.load(in_ptr1 + (96 + x0 + 4 * (-4 + x1)), tmp29 & xmask, eviction_policy='evict_last', other=0.0) tmp41 = tmp39 - tmp40 tmp42 = tmp41 * tmp41 tmp43 = tmp38 + tmp42 tmp44 = tl.load(in_ptr0 + (48 + x0 + 4 * (-4 + x1) + 64 * x2), tmp29 & xmask, other=0.0) tmp45 = tl.load(in_ptr1 + (112 + x0 + 4 * (-4 + x1)), tmp29 & xmask, eviction_policy='evict_last', other=0.0) tmp46 = tmp44 - tmp45 tmp47 = tmp46 * tmp46 tmp48 = tmp43 + tmp47 tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype) tmp50 = tl.where(tmp29, tmp48, tmp49) tmp51 = tmp0 >= tmp27 tmp52 = tl.full([1], 12, tl.int64) tmp53 = tmp0 < tmp52 tmp54 = tmp51 & tmp53 tmp55 = tl.load(in_ptr0 + (x0 + 4 * (-8 + x1) + 64 * x2), tmp54 & xmask, other=0.0) tmp56 = tl.load(in_ptr1 + (128 + x0 + 4 * (-8 + x1)), tmp54 & xmask, eviction_policy='evict_last', other=0.0) tmp57 = tmp55 - tmp56 tmp58 = tmp57 * tmp57 tmp59 = tl.load(in_ptr0 + (16 + x0 + 4 * (-8 + x1) + 64 * x2), tmp54 & xmask, other=0.0) tmp60 = tl.load(in_ptr1 + (144 + x0 + 4 * (-8 + x1)), tmp54 & xmask, eviction_policy='evict_last', other=0.0) tmp61 = tmp59 - tmp60 tmp62 = tmp61 * tmp61 tmp63 = tmp58 + tmp62 tmp64 = tl.load(in_ptr0 + (32 + x0 + 4 * (-8 + x1) + 64 * x2), tmp54 & xmask, other=0.0) tmp65 = tl.load(in_ptr1 + (160 + x0 + 4 * (-8 + x1)), tmp54 & xmask, eviction_policy='evict_last', other=0.0) tmp66 = tmp64 - tmp65 tmp67 = tmp66 * tmp66 tmp68 = tmp63 + tmp67 tmp69 = tl.load(in_ptr0 + (48 + x0 + 4 * (-8 + x1) + 64 * x2), tmp54 & xmask, other=0.0) tmp70 = tl.load(in_ptr1 + (176 + x0 + 4 * (-8 + x1)), tmp54 & xmask, eviction_policy='evict_last', other=0.0) tmp71 = tmp69 - tmp70 tmp72 = tmp71 * tmp71 tmp73 = tmp68 + tmp72 tmp74 = tl.full(tmp73.shape, 0.0, tmp73.dtype) tmp75 = tl.where(tmp54, tmp73, tmp74) tmp76 = tmp0 >= tmp52 tl.full([1], 16, tl.int64) tmp79 = tl.load(in_ptr0 + (x0 + 4 * (-12 + x1) + 64 * x2), tmp76 & xmask, other=0.0) tmp80 = tl.load(in_ptr1 + (192 + x0 + 4 * (-12 + x1)), tmp76 & xmask, eviction_policy='evict_last', other=0.0) tmp81 = tmp79 - tmp80 tmp82 = tmp81 * tmp81 tmp83 = tl.load(in_ptr0 + (16 + x0 + 4 * (-12 + x1) + 64 * x2), tmp76 & xmask, other=0.0) tmp84 = tl.load(in_ptr1 + (208 + x0 + 4 * (-12 + x1)), tmp76 & xmask, eviction_policy='evict_last', other=0.0) tmp85 = tmp83 - tmp84 tmp86 = tmp85 * tmp85 tmp87 = tmp82 + tmp86 tmp88 = tl.load(in_ptr0 + (32 + x0 + 4 * (-12 + x1) + 64 * x2), tmp76 & xmask, other=0.0) tmp89 = tl.load(in_ptr1 + (224 + x0 + 4 * (-12 + x1)), tmp76 & xmask, eviction_policy='evict_last', other=0.0) tmp90 = tmp88 - tmp89 tmp91 = tmp90 * tmp90 tmp92 = tmp87 + tmp91 tmp93 = tl.load(in_ptr0 + (48 + x0 + 4 * (-12 + x1) + 64 * x2), tmp76 & xmask, other=0.0) tmp94 = tl.load(in_ptr1 + (240 + x0 + 4 * (-12 + x1)), tmp76 & xmask, eviction_policy='evict_last', other=0.0) tmp95 = tmp93 - tmp94 tmp96 = tmp95 * tmp95 tmp97 = tmp92 + tmp96 tmp98 = tl.full(tmp97.shape, 0.0, tmp97.dtype) tmp99 = tl.where(tmp76, tmp97, tmp98) tmp100 = tl.where(tmp54, tmp75, tmp99) tmp101 = tl.where(tmp29, tmp50, tmp100) tmp102 = tl.where(tmp4, tmp25, tmp101) tl.store(out_ptr0 + x3, tmp102, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_stack_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0), def arccosh(x): """Compute the arcosh, numerically stable.""" x = torch.clamp(x, min=1 + EPSILON) a = torch.log(x) b = torch.log1p(torch.sqrt(x * x - 1) / x) return a + b def mdot(x, y): """Compute the inner product.""" m = x.new_ones(1, x.size(1)) m[0, 0] = -1 return torch.sum(m * x * y, 1, keepdim=True) def dist(x, y): """Get the hyperbolic distance between x and y.""" return arccosh(-mdot(x, y)) class EuclideanDistanceNew(nn.Module): """Implement a EuclideanDistance object.""" def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ParikhKadam/flair
EuclideanDistance
false
14,164
[ "MIT" ]
7,539
a1732bc5ab0b4aeb09d1ed3a630ae2fd8fa095ef
https://github.com/ParikhKadam/flair/tree/a1732bc5ab0b4aeb09d1ed3a630ae2fd8fa095ef
MultiHeadAttn
import torch from torch import nn import torch.nn.functional as F class MultiHeadAttn(nn.Module): def __init__(self, n_head, d_model, d_head, dropout, dropatt=0, pre_lnorm=False): super(MultiHeadAttn, self).__init__() self.n_head = n_head self.d_model = d_model self.d_head = d_head self.dropout = dropout self.q_net = nn.Linear(d_model, n_head * d_head, bias=False) self.kv_net = nn.Linear(d_model, 2 * n_head * d_head, bias=False) self.drop = nn.Dropout(dropout) self.dropatt = nn.Dropout(dropatt) self.o_net = nn.Linear(n_head * d_head, d_model, bias=False) self.layer_norm = nn.LayerNorm(d_model) self.scale = 1 / d_head ** 0.5 self.pre_lnorm = pre_lnorm def forward(self, h, attn_mask=None, mems=None): if mems is not None: c = torch.cat([mems, h], 0) else: c = h if self.pre_lnorm: c = self.layer_norm(c) head_q = self.q_net(h) head_k, head_v = torch.chunk(self.kv_net(c), 2, -1) head_q = head_q.view(h.size(0), h.size(1), self.n_head, self.d_head) head_k = head_k.view(c.size(0), c.size(1), self.n_head, self.d_head) head_v = head_v.view(c.size(0), c.size(1), self.n_head, self.d_head) attn_score = torch.einsum('ibnd,jbnd->ijbn', (head_q, head_k)) attn_score.mul_(self.scale) if attn_mask is not None and attn_mask.any().item(): if attn_mask.dim() == 2: attn_score.masked_fill_(attn_mask[None, :, :, None], -float ('inf')) elif attn_mask.dim() == 3: attn_score.masked_fill_(attn_mask[:, :, :, None], -float('inf') ) attn_prob = F.softmax(attn_score, dim=1) attn_prob = self.dropatt(attn_prob) attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, head_v)) attn_vec = attn_vec.contiguous().view(attn_vec.size(0), attn_vec. size(1), self.n_head * self.d_head) attn_out = self.o_net(attn_vec) attn_out = self.drop(attn_out) if self.pre_lnorm: output = h + attn_out else: output = self.layer_norm(h + attn_out) return output def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'n_head': 4, 'd_model': 4, 'd_head': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 32 * y1 + 128 * x2), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tl_math.exp(tmp14) tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (y3 + 16 * x2), xmask & ymask) tmp1 = tl.load(in_ptr0 + (4 * y1 + 16 * x2), xmask & ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * y1 + 16 * x2), xmask & ymask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * y1 + 16 * x2), xmask & ymask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * y1 + 16 * x2), xmask & ymask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2 + 16 * y3), tmp8, xmask & ymask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (16 + x0 + 4 * x2 + 32 * x3 + 128 * x1), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 16 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (16, 4), (4, 1)) assert_size_stride(primals_3, (32, 4), (4, 1)) assert_size_stride(primals_4, (4, 16), (16, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 32), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch .float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64, 4)](buf1, buf2, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (4, 64, 1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 4), (4, 1, 64, 16), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 triton_poi_fused__softmax_2[grid(16, 16)](buf4, buf5, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0) del buf4 triton_poi_fused_clone_3[grid(256)](buf1, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf1 buf7 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (1, 64, 16), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_4[grid(256)](buf7, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf7 buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf8, (16, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 4), (1, 16), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_1, buf9, buf10, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_6[grid(64)](primals_1, buf9, buf10, buf11, primals_5, primals_6, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf10 del buf11 del primals_6 return buf12, primals_1, primals_5, buf5, reinterpret_tensor(buf8, (16, 16), (16, 1), 0), buf9, primals_4, reinterpret_tensor(buf6, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf0, (16, 4, 4), (4, 1, 64), 0 ), reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0) class MultiHeadAttnNew(nn.Module): def __init__(self, n_head, d_model, d_head, dropout, dropatt=0, pre_lnorm=False): super(MultiHeadAttnNew, self).__init__() self.n_head = n_head self.d_model = d_model self.d_head = d_head self.dropout = dropout self.q_net = nn.Linear(d_model, n_head * d_head, bias=False) self.kv_net = nn.Linear(d_model, 2 * n_head * d_head, bias=False) self.drop = nn.Dropout(dropout) self.dropatt = nn.Dropout(dropatt) self.o_net = nn.Linear(n_head * d_head, d_model, bias=False) self.layer_norm = nn.LayerNorm(d_model) self.scale = 1 / d_head ** 0.5 self.pre_lnorm = pre_lnorm def forward(self, input_0): primals_2 = self.q_net.weight primals_3 = self.kv_net.weight primals_4 = self.o_net.weight primals_5 = self.layer_norm.weight primals_6 = self.layer_norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
PeganovAnton/transformer-xl
MultiHeadAttn
false
14,165
[ "Apache-2.0" ]
133
f36428445cc903872fde54d90bc5e61886420a5a
https://github.com/PeganovAnton/transformer-xl/tree/f36428445cc903872fde54d90bc5e61886420a5a
UniformBoxWarp
import torch from torch import nn class UniformBoxWarp(nn.Module): def __init__(self, sidelength): super().__init__() self.scale_factor = 2 / sidelength def forward(self, coordinates): return coordinates * self.scale_factor def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'sidelength': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class UniformBoxWarpNew(nn.Module): def __init__(self, sidelength): super().__init__() self.scale_factor = 2 / sidelength def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PeterouZh/CIPS-3D
UniformBoxWarp
false
14,166
[ "MIT" ]
308
9b8bfa0fb23f642af042e150ccd70408f9d137c6
https://github.com/PeterouZh/CIPS-3D/tree/9b8bfa0fb23f642af042e150ccd70408f9d137c6
WideResNet
import math import torch import torch.nn as nn import torch.nn.functional as F class BasicBlock(nn.Module): m = 2 def __init__(self, in_planes, out_planes, stride, dropout, fixup_l, fixup_coeff): super(BasicBlock, self).__init__() self._dropout = dropout self.relu = nn.ReLU(inplace=True) self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride =stride, padding=1, bias=False) self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False) self.equalInOut = in_planes == out_planes self.conv_res = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False) self.conv_res = not self.equalInOut and self.conv_res or None self.scale = nn.Parameter(torch.ones(1)) self.biases = nn.ParameterList([nn.Parameter(torch.zeros(1)) for _ in range(4)]) k = self.conv1.kernel_size[0] * self.conv1.kernel_size[1 ] * self.conv1.out_channels self.conv1.weight.data.normal_(0, fixup_coeff * fixup_l ** (-1 / (2 * self.m - 2)) * math.sqrt(2.0 / k)) self.conv2.weight.data.zero_() if self.conv_res is not None: k = self.conv_res.kernel_size[0] * self.conv_res.kernel_size[1 ] * self.conv_res.out_channels self.conv_res.weight.data.normal_(0, math.sqrt(2.0 / k)) def forward(self, x): x_out = self.relu(x + self.biases[0]) out = self.conv1(x_out) + self.biases[1] out = self.relu(out) + self.biases[2] if self._dropout > 0: out = F.dropout(out, p=self._dropout, training=self.training) out = self.scale * self.conv2(out) + self.biases[3] if self.equalInOut: return torch.add(x, out) return torch.add(self.conv_res(x_out), out) class NetworkBlock(nn.Module): def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropout, fixup_l, fixup_coeff): super(NetworkBlock, self).__init__() self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropout, fixup_l, fixup_coeff) def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropout, fixup_l, fixup_coeff): layers = [] for i in range(int(nb_layers)): _in_planes = i == 0 and in_planes or out_planes _stride = i == 0 and stride or 1 layers.append(block(_in_planes, out_planes, _stride, dropout= dropout, fixup_l=fixup_l, fixup_coeff=fixup_coeff)) return nn.Sequential(*layers) def forward(self, x): return self.layer(x) class WideResNet(nn.Module): def __init__(self, depth, num_classes, widen_factor=1, dropout=0.0, fixup_coeff=1): super(WideResNet, self).__init__() nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor] assert (depth - 4) % 6 == 0, 'You need to change the number of layers' n = (depth - 4) / 6 block = BasicBlock fixup_l = n * 3 self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False) self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropout=dropout, fixup_l=fixup_l, fixup_coeff=fixup_coeff) self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropout=dropout, fixup_l=fixup_l, fixup_coeff=fixup_coeff) self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropout=dropout, fixup_l=fixup_l, fixup_coeff=fixup_coeff) self.relu = nn.ReLU(inplace=True) self.fc = nn.Linear(nChannels[3], num_classes) self.nChannels = nChannels[3] self.fc.bias.data.zero_() self.fc.weight.data.zero_() k = self.conv1.kernel_size[0] * self.conv1.kernel_size[1 ] * self.conv1.out_channels self.conv1.weight.data.normal_(0, math.sqrt(2.0 / k)) self.bias1 = nn.Parameter(torch.zeros(1)) self.bias2 = nn.Parameter(torch.zeros(1)) def forward(self, x): out = self.conv1(x) + self.bias1 out = self.block1(out) out = self.block2(out) out = self.block3(out) out = self.relu(out) out = F.adaptive_avg_pool2d(out, 1) out = out.view(-1, self.nChannels) return self.fc(out + self.bias2) def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {'depth': 4, 'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_red_fused_add_mean_relu_threshold_backward_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr, RBLOCK: tl.constexpr): xnumel = 64 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) _tmp7 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1, 1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = _tmp7 + tmp6 _tmp7 = tl.where(rmask & xmask, tmp8, _tmp7) tmp9 = 0.0 tmp10 = tmp5 <= tmp9 tl.store(out_ptr0 + (r1 + 4096 * x0), tmp10, rmask & xmask) tmp7 = tl.sum(_tmp7, 1)[:, None] tmp13 = tl.load(in_ptr2 + 0) tmp14 = tl.broadcast_to(tmp13, [XBLOCK, 1]) tmp11 = 4096.0 tmp12 = tmp7 / tmp11 tmp15 = tmp12 + tmp14 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp15, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (4, 64), (64, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf1 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch.float32 ) buf4 = empty_strided_cuda((4, 16, 64, 64), (65536, 4096, 64, 1), torch.bool) buf2 = reinterpret_tensor(buf1, (1, 64), (64, 1), 0) del buf1 get_raw_stream(0) triton_red_fused_add_mean_relu_threshold_backward_0[grid(64)](buf2, buf0, primals_3, primals_4, buf4, 64, 4096, XBLOCK=1, RBLOCK= 2048, num_warps=16, num_stages=1) del buf0 del primals_3 del primals_4 buf3 = empty_strided_cuda((1, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf3) del primals_6 return buf3, primals_1, primals_2, buf2, primals_5, buf4 class BasicBlock(nn.Module): m = 2 def __init__(self, in_planes, out_planes, stride, dropout, fixup_l, fixup_coeff): super(BasicBlock, self).__init__() self._dropout = dropout self.relu = nn.ReLU(inplace=True) self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride =stride, padding=1, bias=False) self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False) self.equalInOut = in_planes == out_planes self.conv_res = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False) self.conv_res = not self.equalInOut and self.conv_res or None self.scale = nn.Parameter(torch.ones(1)) self.biases = nn.ParameterList([nn.Parameter(torch.zeros(1)) for _ in range(4)]) k = self.conv1.kernel_size[0] * self.conv1.kernel_size[1 ] * self.conv1.out_channels self.conv1.weight.data.normal_(0, fixup_coeff * fixup_l ** (-1 / (2 * self.m - 2)) * math.sqrt(2.0 / k)) self.conv2.weight.data.zero_() if self.conv_res is not None: k = self.conv_res.kernel_size[0] * self.conv_res.kernel_size[1 ] * self.conv_res.out_channels self.conv_res.weight.data.normal_(0, math.sqrt(2.0 / k)) def forward(self, x): x_out = self.relu(x + self.biases[0]) out = self.conv1(x_out) + self.biases[1] out = self.relu(out) + self.biases[2] if self._dropout > 0: out = F.dropout(out, p=self._dropout, training=self.training) out = self.scale * self.conv2(out) + self.biases[3] if self.equalInOut: return torch.add(x, out) return torch.add(self.conv_res(x_out), out) class NetworkBlock(nn.Module): def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropout, fixup_l, fixup_coeff): super(NetworkBlock, self).__init__() self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropout, fixup_l, fixup_coeff) def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropout, fixup_l, fixup_coeff): layers = [] for i in range(int(nb_layers)): _in_planes = i == 0 and in_planes or out_planes _stride = i == 0 and stride or 1 layers.append(block(_in_planes, out_planes, _stride, dropout= dropout, fixup_l=fixup_l, fixup_coeff=fixup_coeff)) return nn.Sequential(*layers) def forward(self, x): return self.layer(x) class WideResNetNew(nn.Module): def __init__(self, depth, num_classes, widen_factor=1, dropout=0.0, fixup_coeff=1): super(WideResNetNew, self).__init__() nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor] assert (depth - 4) % 6 == 0, 'You need to change the number of layers' n = (depth - 4) / 6 block = BasicBlock fixup_l = n * 3 self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False) self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropout=dropout, fixup_l=fixup_l, fixup_coeff=fixup_coeff) self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropout=dropout, fixup_l=fixup_l, fixup_coeff=fixup_coeff) self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropout=dropout, fixup_l=fixup_l, fixup_coeff=fixup_coeff) self.relu = nn.ReLU(inplace=True) self.fc = nn.Linear(nChannels[3], num_classes) self.nChannels = nChannels[3] self.fc.bias.data.zero_() self.fc.weight.data.zero_() k = self.conv1.kernel_size[0] * self.conv1.kernel_size[1 ] * self.conv1.out_channels self.conv1.weight.data.normal_(0, math.sqrt(2.0 / k)) self.bias1 = nn.Parameter(torch.zeros(1)) self.bias2 = nn.Parameter(torch.zeros(1)) def forward(self, input_0): primals_3 = self.bias1 primals_4 = self.bias2 primals_1 = self.conv1.weight primals_5 = self.fc.weight primals_6 = self.fc.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
PavelOstyakov/pipeline
WideResNet
false
14,167
[ "MIT" ]
214
236c050af3be9dbb534e959589040e9433501e2b
https://github.com/PavelOstyakov/pipeline/tree/236c050af3be9dbb534e959589040e9433501e2b
SinActivation
import torch from torch import nn class SinActivation(nn.Module): def __init__(self): super(SinActivation, self).__init__() def forward(self, x): return torch.sin(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sin_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.sin(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sin_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class SinActivationNew(nn.Module): def __init__(self): super(SinActivationNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PeterouZh/CIPS-3D
SinActivation
false
14,168
[ "MIT" ]
308
9b8bfa0fb23f642af042e150ccd70408f9d137c6
https://github.com/PeterouZh/CIPS-3D/tree/9b8bfa0fb23f642af042e150ccd70408f9d137c6
EqualConvTranspose2d
import math import torch import torch.nn.functional as F from torch import nn class EqualConvTranspose2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True): super().__init__() self.weight = nn.Parameter(torch.randn(in_channel, out_channel, kernel_size, kernel_size)) self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2) self.stride = stride self.padding = padding if bias: self.bias = nn.Parameter(torch.zeros(out_channel)) else: self.bias = None def forward(self, input): out = F.conv_transpose2d(input, self.weight * self.scale, bias=self .bias, stride=self.stride, padding=self.padding) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[0]}, {self.weight.shape[1]}, {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})' ) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.125 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 49 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(primals_3, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 7, 7), (196, 49, 7, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(784)](buf2, primals_2, 784, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf2, primals_3, buf0 class EqualConvTranspose2dNew(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True): super().__init__() self.weight = nn.Parameter(torch.randn(in_channel, out_channel, kernel_size, kernel_size)) self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2) self.stride = stride self.padding = padding if bias: self.bias = nn.Parameter(torch.zeros(out_channel)) else: self.bias = None def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[0]}, {self.weight.shape[1]}, {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})' ) def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
PeterouZh/CIPS-3D
EqualConvTranspose2d
false
14,169
[ "MIT" ]
308
9b8bfa0fb23f642af042e150ccd70408f9d137c6
https://github.com/PeterouZh/CIPS-3D/tree/9b8bfa0fb23f642af042e150ccd70408f9d137c6
CLNLayer
import torch import torch.nn.functional as F from torch import nn class CLN(nn.Module): def __init__(self, in_dim, use_style_fc=False, style_dim=None, which_linear=nn.Linear, spectral_norm=False, eps=1e-05, **kwargs): super(CLN, self).__init__() self.in_dim = in_dim self.use_style_fc = use_style_fc self.style_dim = style_dim self.spectral_norm = spectral_norm if use_style_fc: self.gain = which_linear(style_dim, in_dim) self.bias = which_linear(style_dim, in_dim) if spectral_norm: self.gain = nn.utils.spectral_norm(self.gain) self.bias = nn.utils.spectral_norm(self.bias) else: self.style_dim = in_dim * 2 self.eps = eps pass def forward(self, x, style): """ Calculate class-conditional gains and biases. :param x: (b, c) or (b, n, c) :param style: (b, c) :return: """ if self.use_style_fc: gain = self.gain(style) + 1.0 bias = self.bias(style) else: style = rearrange(style, 'b (n c) -> b n c', n=2) gain, bias = style.unbind(dim=1) gain = gain + 1.0 if x.dim() == 3: gain = rearrange(gain, 'b c -> b 1 c') bias = rearrange(bias, 'b c -> b 1 c') elif x.dim() == 2: pass else: assert 0 out = F.layer_norm(x, normalized_shape=(self.in_dim,), weight=None, bias=None, eps=self.eps) out = out * gain + bias return out def __repr__(self): s = ( f'{self.__class__.__name__}(in_dim={self.in_dim}, style_dim={self.style_dim})' ) return s class CLNLayer(nn.Module): def __repr__(self): return f'{self.__class__.__name__}({self.repr})' def __init__(self, in_dim, out_dim, style_dim): super().__init__() self.in_dim = in_dim self.out_dim = out_dim self.style_dim = style_dim self.repr = ( f'in_dim={in_dim}, out_dim={out_dim}, style_dim={style_dim}') self.linear1 = nn.Linear(in_dim, out_dim) self.cln1 = CLN(in_dim=out_dim, use_style_fc=True, style_dim=style_dim) self.style_dim = self.cln1.style_dim self.act1 = nn.LeakyReLU(0.2, inplace=True) pass def forward(self, x, style): x = self.linear1(x) x = self.cln1(x, style) x = self.act1(x) return x def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn.functional as F from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_native_layer_norm_1( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x2, xmask) tmp9 = tl.load(in_out_ptr0 + x2, xmask) tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp4 * tmp7 tmp11 = tmp9 + tmp10 tmp12 = tmp8 + tmp11 tmp13 = 0.0 tmp14 = tmp12 > tmp13 tmp15 = 0.2 tmp16 = tmp12 * tmp15 tmp17 = tl.where(tmp14, tmp12, tmp16) tmp18 = tmp17 > tmp13 tl.store(in_out_ptr0 + x2, tmp17, xmask) tl.store(out_ptr0 + x2, tmp18, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor( primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, primals_6, reinterpret_tensor( primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_6, reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf4 = empty_strided_cuda((4, 1), (1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(4)](buf0, buf3, buf4, 4, XBLOCK=4, num_warps=1, num_stages=1) buf5 = buf2 del buf2 buf6 = buf5 del buf5 buf7 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_native_layer_norm_1[ grid(16)](buf6, buf0, buf3, buf4, buf1, primals_8, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf3 del buf4 del primals_8 return buf6, primals_3, primals_6, buf0, buf1, buf7 class CLN(nn.Module): def __init__(self, in_dim, use_style_fc=False, style_dim=None, which_linear=nn.Linear, spectral_norm=False, eps=1e-05, **kwargs): super(CLN, self).__init__() self.in_dim = in_dim self.use_style_fc = use_style_fc self.style_dim = style_dim self.spectral_norm = spectral_norm if use_style_fc: self.gain = which_linear(style_dim, in_dim) self.bias = which_linear(style_dim, in_dim) if spectral_norm: self.gain = nn.utils.spectral_norm(self.gain) self.bias = nn.utils.spectral_norm(self.bias) else: self.style_dim = in_dim * 2 self.eps = eps pass def forward(self, x, style): """ Calculate class-conditional gains and biases. :param x: (b, c) or (b, n, c) :param style: (b, c) :return: """ if self.use_style_fc: gain = self.gain(style) + 1.0 bias = self.bias(style) else: style = rearrange(style, 'b (n c) -> b n c', n=2) gain, bias = style.unbind(dim=1) gain = gain + 1.0 if x.dim() == 3: gain = rearrange(gain, 'b c -> b 1 c') bias = rearrange(bias, 'b c -> b 1 c') elif x.dim() == 2: pass else: assert 0 out = F.layer_norm(x, normalized_shape=(self.in_dim,), weight=None, bias=None, eps=self.eps) out = out * gain + bias return out def __repr__(self): s = ( f'{self.__class__.__name__}(in_dim={self.in_dim}, style_dim={self.style_dim})' ) return s class CLNLayerNew(nn.Module): def __repr__(self): return f'{self.__class__.__name__}({self.repr})' def __init__(self, in_dim, out_dim, style_dim): super().__init__() self.in_dim = in_dim self.out_dim = out_dim self.style_dim = style_dim self.repr = ( f'in_dim={in_dim}, out_dim={out_dim}, style_dim={style_dim}') self.linear1 = nn.Linear(in_dim, out_dim) self.cln1 = CLN(in_dim=out_dim, use_style_fc=True, style_dim=style_dim) self.style_dim = self.cln1.style_dim self.act1 = nn.LeakyReLU(0.2, inplace=True) pass def forward(self, input_0, input_1): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_3 = self.cln1.gain.weight primals_5 = self.cln1.gain.bias primals_4 = self.cln1.bias.weight primals_8 = self.cln1.bias.bias primals_6 = input_0 primals_7 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
PeterouZh/CIPS-3D
CLNLayer
false
14,170
[ "MIT" ]
308
9b8bfa0fb23f642af042e150ccd70408f9d137c6
https://github.com/PeterouZh/CIPS-3D/tree/9b8bfa0fb23f642af042e150ccd70408f9d137c6
Smoother
from torch.nn import Module import torch from torch import Tensor from typing import Optional import torch.nn.functional as F from torch.nn import Dropout from torch.nn import LayerNorm from torch.nn import Conv1d from torch.nn import MultiheadAttention class Smoother(Module): """Convolutional Transformer Encoder Layer""" def __init__(self, d_model: 'int', nhead: 'int', d_hid: 'int', dropout=0.1 ): super(Smoother, self).__init__() self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout) self.conv1 = Conv1d(d_model, d_hid, 9, padding=4) self.conv2 = Conv1d(d_hid, d_model, 1, padding=0) self.norm1 = LayerNorm(d_model) self.norm2 = LayerNorm(d_model) self.dropout1 = Dropout(dropout) self.dropout2 = Dropout(dropout) def forward(self, src: 'Tensor', src_mask: 'Optional[Tensor]'=None, src_key_padding_mask: 'Optional[Tensor]'=None) ->Tensor: src2 = self.self_attn(src, src, src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0] src = src + self.dropout1(src2) src = self.norm1(src) src2 = src.transpose(0, 1).transpose(1, 2) src2 = self.conv2(F.relu(self.conv1(src2))) src2 = src2.transpose(1, 2).transpose(0, 1) src = src + self.dropout2(src2) src = self.norm2(src) return src def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'nhead': 4, 'd_hid': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch.nn import Module from torch.nn import Dropout from torch.nn import LayerNorm from torch.nn import Conv1d from torch.nn import MultiheadAttention assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 16 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 12 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_convolution_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_add_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x3 = xindex y0 = yindex x1 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x3 + 16 * y0), xmask & ymask, eviction_policy ='evict_last') tmp1 = tl.load(in_ptr1 + (y0 + 4 * x3), xmask & ymask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(out_ptr0 + (x3 + 16 * y0), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_native_layer_norm_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (12,), (1,)) assert_size_stride(primals_3, (12, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 9), (36, 9, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((3, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(192)](buf0, primals_2, buf1, 192, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 buf2 = empty_strided_cuda((16, 4, 1), (1, 16, 64), torch.float32) triton_poi_fused_mul_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf2, reinterpret_tensor(buf1, (16, 1, 4), (1, 0, 16), 64), out=buf3) buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) buf5 = buf3 del buf3 triton_poi_fused__softmax_3[grid(256)](buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf4 buf6 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf5, reinterpret_tensor(buf1, (16, 4, 1), (1, 16, 0), 128), out=buf6) buf7 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32) triton_poi_fused_clone_4[grid(4, 16)](buf6, buf7, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) buf8 = reinterpret_tensor(buf6, (16, 4), (4, 1), 0) del buf6 extern_kernels.addmm(primals_5, reinterpret_tensor(buf7, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8) del primals_5 buf9 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_1, buf8, buf9, buf10, 16, XBLOCK=16, num_warps=1, num_stages=1) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_6[grid(64)](primals_1, buf8, buf9, buf10, primals_6, primals_7, buf11, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_convolution_7[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf13 = extern_kernels.convolution(buf12, primals_8, stride=(1,), padding=(4,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf13, (4, 4, 4), (16, 4, 1)) buf14 = buf13 del buf13 triton_poi_fused_convolution_relu_8[grid(64)](buf14, primals_9, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_9 buf15 = extern_kernels.convolution(buf14, primals_10, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf15, (4, 4, 4), (16, 4, 1)) buf16 = buf12 del buf12 triton_poi_fused_add_9[grid(4, 16)](buf11, buf15, primals_11, buf16, 4, 16, XBLOCK=8, YBLOCK=4, num_warps=1, num_stages=1) del primals_11 buf17 = buf9 del buf9 buf18 = buf10 del buf10 triton_poi_fused_native_layer_norm_10[grid(16)](buf16, buf17, buf18, 16, XBLOCK=16, num_warps=1, num_stages=1) buf19 = buf15 del buf15 triton_poi_fused_native_layer_norm_11[grid(64)](buf16, buf17, buf18, primals_12, primals_13, buf19, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf17 del buf18 del primals_13 return (buf19, primals_1, primals_6, primals_8, primals_10, primals_12, buf5, reinterpret_tensor(buf7, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (4, 4, 4), (4, 1, 16), 0), buf14, buf16, primals_4, reinterpret_tensor(buf1, (16, 1, 4), (1, 1, 16), 128), reinterpret_tensor(buf2, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf1, (16, 4, 1), (1, 16, 1), 64)) class SmootherNew(Module): """Convolutional Transformer Encoder Layer""" def __init__(self, d_model: 'int', nhead: 'int', d_hid: 'int', dropout=0.1 ): super(SmootherNew, self).__init__() self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout) self.conv1 = Conv1d(d_model, d_hid, 9, padding=4) self.conv2 = Conv1d(d_hid, d_model, 1, padding=0) self.norm1 = LayerNorm(d_model) self.norm2 = LayerNorm(d_model) self.dropout1 = Dropout(dropout) self.dropout2 = Dropout(dropout) def forward(self, input_0): primals_3 = self.self_attn.in_proj_weight primals_2 = self.self_attn.in_proj_bias primals_4 = self.self_attn.out_proj.weight primals_5 = self.self_attn.out_proj.bias primals_8 = self.conv1.weight primals_6 = self.conv1.bias primals_10 = self.conv2.weight primals_7 = self.conv2.bias primals_9 = self.norm1.weight primals_11 = self.norm1.bias primals_12 = self.norm2.weight primals_13 = self.norm2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
OlegJakushkin/FragmentVC
Smoother
false
14,171
[ "MIT" ]
136
8aa673157b855bf3b67f06fdb6eb4b2a12ed0005
https://github.com/OlegJakushkin/FragmentVC/tree/8aa673157b855bf3b67f06fdb6eb4b2a12ed0005
StridedStyle
import torch import torch.nn as nn class NamedTensor(nn.Module): def __init__(self): super().__init__() def forward(self, x): return x class StridedStyle(nn.ModuleList): def __init__(self, n_latents): super().__init__([NamedTensor() for _ in range(n_latents)]) self.n_latents = n_latents def forward(self, x): styles = [self[i](x[:, i, :]) for i in range(self.n_latents)] return torch.stack(styles, dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_latents': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 16 x0 = xindex % 4 x2 = xindex // 64 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (16 + x0 + 4 * (-4 + x1) + 64 * x2), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (32 + x0 + 4 * (-8 + x1) + 64 * x2), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp19 = tl.load(in_ptr0 + (48 + x0 + 4 * (-12 + x1) + 64 * x2), tmp16 & xmask, other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + x3, tmp22, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_stack_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0), class NamedTensor(nn.Module): def __init__(self): super().__init__() def forward(self, x): return x class StridedStyleNew(nn.ModuleList): def __init__(self, n_latents): super().__init__([NamedTensor() for _ in range(n_latents)]) self.n_latents = n_latents def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
PeterouZh/GAN2Shape
StridedStyle
false
14,172
[ "MIT" ]
421
ea077e543a3fb824ce06385e8a837dcbae8e9aaa
https://github.com/PeterouZh/GAN2Shape/tree/ea077e543a3fb824ce06385e8a837dcbae8e9aaa
FiLMLayer
import torch from torch import nn class FiLMLayer(nn.Module): def __init__(self, input_dim, hidden_dim): super().__init__() self.layer = nn.Linear(input_dim, hidden_dim) def forward(self, x, freq, phase_shift): x = self.layer(x) freq = freq.unsqueeze(1).expand_as(x) phase_shift = phase_shift.unsqueeze(1).expand_as(x) return torch.sin(freq * x + phase_shift) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'hidden_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_sin_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = tl.load(in_ptr1 + x3, xmask) tmp3 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl_math.sin(tmp4) tl.store(out_ptr0 + x3, tmp5, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_sin_0[grid(64)](primals_4, buf0, primals_5, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf1, primals_4, primals_5, reinterpret_tensor(primals_3, (16, 4 ), (4, 1), 0), buf0 class FiLMLayerNew(nn.Module): def __init__(self, input_dim, hidden_dim): super().__init__() self.layer = nn.Linear(input_dim, hidden_dim) def forward(self, input_0, input_1, input_2): primals_1 = self.layer.weight primals_2 = self.layer.bias primals_3 = input_0 primals_4 = input_1 primals_5 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
PeterouZh/CIPS-3D
FiLMLayer
false
14,173
[ "MIT" ]
308
9b8bfa0fb23f642af042e150ccd70408f9d137c6
https://github.com/PeterouZh/CIPS-3D/tree/9b8bfa0fb23f642af042e150ccd70408f9d137c6
RKDAngleLoss
import torch import torch.nn as nn from torch.nn import functional as F class RKDAngleLoss(nn.Module): """ Module for calculating RKD Angle Loss """ def forward(self, teacher, student, normalize=True): """ Forward function :param teacher (torch.FloatTensor): Prediction made by the teacher model :param student (torch.FloatTensor): Prediction made by the student model :param normalize (bool): True if inputs need to be normalized """ with torch.no_grad(): t = teacher.unsqueeze(0) - teacher.unsqueeze(1) if normalize: t = F.normalize(t, p=2, dim=2) t = torch.bmm(t, t.transpose(1, 2)).view(-1) s = student.unsqueeze(0) - student.unsqueeze(1) if normalize: s = F.normalize(s, p=2, dim=2) s = torch.bmm(s, s.transpose(1, 2)).view(-1) return F.smooth_l1_loss(s, t) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clamp_min_linalg_vector_norm_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = libdevice.sqrt(tmp18) tmp20 = 1e-12 tmp21 = triton_helpers.maximum(tmp19, tmp20) tl.store(out_ptr0 + x2, tmp21, xmask) @triton.jit def triton_poi_fused_div_sub_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 16 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex // 4 x5 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 / tmp3 tl.store(out_ptr0 + x5, tmp4, xmask) @triton.jit def triton_per_fused_smooth_l1_loss_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1.0 tmp5 = tmp3 < tmp4 tmp6 = tmp3 * tmp3 tmp7 = 0.5 tmp8 = tmp6 * tmp7 tmp9 = tmp8 * tmp4 tmp10 = tmp3 - tmp7 tmp11 = tl.where(tmp5, tmp9, tmp10) tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.sum(tmp12, 1)[:, None] tmp15 = 64.0 tmp16 = tmp14 / tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_min_linalg_vector_norm_sub_0[grid(16)](arg1_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_div_sub_1[grid(64)](arg1_1, buf0, buf1, 64, XBLOCK =64, num_warps=1, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf1, reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), out=buf2) buf3 = buf0 del buf0 triton_poi_fused_clamp_min_linalg_vector_norm_sub_0[grid(16)](arg0_1, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = buf1 del buf1 triton_poi_fused_div_sub_1[grid(64)](arg0_1, buf3, buf4, 64, XBLOCK =64, num_warps=1, num_stages=1) del arg0_1 del buf3 buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf4, reinterpret_tensor(buf4, (4, 4, 4), (16, 1, 4), 0), out=buf5) del buf4 buf6 = empty_strided_cuda((), (), torch.float32) buf7 = buf6 del buf6 triton_per_fused_smooth_l1_loss_2[grid(1)](buf7, buf2, buf5, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf2 del buf5 return buf7, class RKDAngleLossNew(nn.Module): """ Module for calculating RKD Angle Loss """ def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PiaCuk/KD_Lib
RKDAngleLoss
false
14,174
[ "MIT" ]
360
153299d484e4c6b33793749709dbb0f33419f190
https://github.com/PiaCuk/KD_Lib/tree/153299d484e4c6b33793749709dbb0f33419f190