entry_point
stringlengths
1
65
original_triton_python_code
stringlengths
208
619k
optimised_triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
PKT
import torch from torch import nn class PKT(nn.Module): """Probabilistic Knowledge Transfer for deep representation learning Code from author: https://github.com/passalis/probabilistic_kt""" def __init__(self): super(PKT, self).__init__() def forward(self, f_s, f_t): return self.cosine_similarity_loss(f_s, f_t) @staticmethod def cosine_similarity_loss(output_net, target_net, eps=1e-07): output_net_norm = torch.sqrt(torch.sum(output_net ** 2, dim=1, keepdim=True)) output_net = output_net / (output_net_norm + eps) output_net[output_net != output_net] = 0 target_net_norm = torch.sqrt(torch.sum(target_net ** 2, dim=1, keepdim=True)) target_net = target_net / (target_net_norm + eps) target_net[target_net != target_net] = 0 model_similarity = torch.mm(output_net, output_net.transpose(0, 1)) target_similarity = torch.mm(target_net, target_net.transpose(0, 1)) model_similarity = (model_similarity + 1.0) / 2.0 target_similarity = (target_similarity + 1.0) / 2.0 model_similarity = model_similarity / torch.sum(model_similarity, dim=1, keepdim=True) target_similarity = target_similarity / torch.sum(target_similarity, dim=1, keepdim=True) loss = torch.mean(target_similarity * torch.log((target_similarity + eps) / (model_similarity + eps))) return loss def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_index_put_lift_fresh_pow_sqrt_sum_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-07 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tmp16 = tmp15 != tmp15 tmp17 = 0.0 tmp18 = tl.where(tmp16, tmp17, tmp15) tl.store(in_out_ptr0 + x2, tmp18, xmask) @triton.jit def triton_per_fused_add_div_log_mean_mul_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + r2, None) tmp5 = tl.load(in_ptr0 + 4 * r1, None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + 4 * r1), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * r1), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (3 + 4 * r1), None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + r2, None) tmp26 = tl.load(in_ptr1 + 4 * r1, None, eviction_policy='evict_last') tmp29 = tl.load(in_ptr1 + (1 + 4 * r1), None, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (2 + 4 * r1), None, eviction_policy='evict_last') tmp37 = tl.load(in_ptr1 + (3 + 4 * r1), None, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp6 = tmp5 + tmp1 tmp7 = tmp6 * tmp3 tmp9 = tmp8 + tmp1 tmp10 = tmp9 * tmp3 tmp11 = tmp7 + tmp10 tmp13 = tmp12 + tmp1 tmp14 = tmp13 * tmp3 tmp15 = tmp11 + tmp14 tmp17 = tmp16 + tmp1 tmp18 = tmp17 * tmp3 tmp19 = tmp15 + tmp18 tmp20 = tmp4 / tmp19 tmp21 = 1e-07 tmp22 = tmp20 + tmp21 tmp24 = tmp23 + tmp1 tmp25 = tmp24 * tmp3 tmp27 = tmp26 + tmp1 tmp28 = tmp27 * tmp3 tmp30 = tmp29 + tmp1 tmp31 = tmp30 * tmp3 tmp32 = tmp28 + tmp31 tmp34 = tmp33 + tmp1 tmp35 = tmp34 * tmp3 tmp36 = tmp32 + tmp35 tmp38 = tmp37 + tmp1 tmp39 = tmp38 * tmp3 tmp40 = tmp36 + tmp39 tmp41 = tmp25 / tmp40 tmp42 = tmp41 + tmp21 tmp43 = tmp22 / tmp42 tmp44 = tl_math.log(tmp43) tmp45 = tmp20 * tmp44 tmp46 = tl.broadcast_to(tmp45, [XBLOCK, RBLOCK]) tmp48 = tl.sum(tmp46, 1)[:, None] tmp49 = 16.0 tmp50 = tmp48 / tmp49 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp50, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_div_index_put_lift_fresh_pow_sqrt_sum_0[grid(16)]( buf1, arg1_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) buf4 = buf1 del buf1 buf5 = buf4 del buf4 triton_poi_fused_add_div_index_put_lift_fresh_pow_sqrt_sum_0[grid(16)]( buf5, arg0_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf5, reinterpret_tensor(buf5, (4, 4), (1, 4), 0), out=buf6) del buf5 buf7 = empty_strided_cuda((), (), torch.float32) buf8 = buf7 del buf7 triton_per_fused_add_div_log_mean_mul_sum_1[grid(1)](buf8, buf2, buf6, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf2 del buf6 return buf8, class PKTNew(nn.Module): """Probabilistic Knowledge Transfer for deep representation learning Code from author: https://github.com/passalis/probabilistic_kt""" def __init__(self): super(PKTNew, self).__init__() @staticmethod def cosine_similarity_loss(output_net, target_net, eps=1e-07): output_net_norm = torch.sqrt(torch.sum(output_net ** 2, dim=1, keepdim=True)) output_net = output_net / (output_net_norm + eps) output_net[output_net != output_net] = 0 target_net_norm = torch.sqrt(torch.sum(target_net ** 2, dim=1, keepdim=True)) target_net = target_net / (target_net_norm + eps) target_net[target_net != target_net] = 0 model_similarity = torch.mm(output_net, output_net.transpose(0, 1)) target_similarity = torch.mm(target_net, target_net.transpose(0, 1)) model_similarity = (model_similarity + 1.0) / 2.0 target_similarity = (target_similarity + 1.0) / 2.0 model_similarity = model_similarity / torch.sum(model_similarity, dim=1, keepdim=True) target_similarity = target_similarity / torch.sum(target_similarity, dim=1, keepdim=True) loss = torch.mean(target_similarity * torch.log((target_similarity + eps) / (model_similarity + eps))) return loss def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
kctsiolis/RepDistiller
PKT
false
3,932
[ "BSD-2-Clause" ]
0
ce88f6e53fcf8ef81c5bac2d20ad31628dd279ac
https://github.com/kctsiolis/RepDistiller/tree/ce88f6e53fcf8ef81c5bac2d20ad31628dd279ac
StyleLoss
import torch import torch.nn as nn class StyleLoss(nn.Module): def __init__(self): super().__init__() self.l1loss = nn.L1Loss() def gram(self, feature): N, C, H, W = feature.shape feature = feature.view(N, C, H * W) gram_mat = torch.bmm(feature, torch.transpose(feature, 1, 2)) return gram_mat / (C * H * W) def forward(self, results, targets): loss = 0.0 for i, (ress, tars) in enumerate(zip(results, targets)): loss += self.l1loss(self.gram(ress), self.gram(tars)) return loss / len(results) def get_inputs(): return [torch.rand([4, 4, 4, 4, 4]), torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_abs_add_div_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp10 = tl.load(in_ptr2 + r0, None) tmp12 = tl.load(in_ptr3 + r0, None) tmp19 = tl.load(in_ptr4 + r0, None) tmp21 = tl.load(in_ptr5 + r0, None) tmp28 = tl.load(in_ptr6 + r0, None) tmp30 = tl.load(in_ptr7 + r0, None) tmp1 = 0.015625 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 - tmp4 tmp6 = tl_math.abs(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp11 = tmp10 * tmp1 tmp13 = tmp12 * tmp1 tmp14 = tmp11 - tmp13 tmp15 = tl_math.abs(tmp14) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.sum(tmp16, 1)[:, None] tmp20 = tmp19 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp20 - tmp22 tmp24 = tl_math.abs(tmp23) tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.sum(tmp25, 1)[:, None] tmp29 = tmp28 * tmp1 tmp31 = tmp30 * tmp1 tmp32 = tmp29 - tmp31 tmp33 = tl_math.abs(tmp32) tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK]) tmp36 = tl.sum(tmp34, 1)[:, None] tmp37 = 64.0 tmp38 = tmp9 / tmp37 tmp39 = 0.0 tmp40 = tmp38 + tmp39 tmp41 = tmp18 / tmp37 tmp42 = tmp40 + tmp41 tmp43 = tmp27 / tmp37 tmp44 = tmp42 + tmp43 tmp45 = tmp36 / tmp37 tmp46 = tmp44 + tmp45 tmp47 = 0.25 tmp48 = tmp46 * tmp47 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp48, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(arg0_1, (4, 16, 4), (64, 1, 16), 0), out=buf0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg1_1, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(arg1_1, (4, 16, 4), (64, 1, 16), 0), out=buf1) buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg1_1, (4, 4, 16), (64, 16, 1), 768), reinterpret_tensor(arg1_1, (4, 16, 4), (64, 1, 16), 768), out=buf10) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 16), (64, 16, 1), 256), reinterpret_tensor(arg0_1, (4, 16, 4), (64, 1, 16), 256), out=buf3) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg1_1, (4, 4, 16), (64, 16, 1), 256), reinterpret_tensor(arg1_1, (4, 16, 4), (64, 1, 16), 256), out=buf4) buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 16), (64, 16, 1), 512), reinterpret_tensor(arg0_1, (4, 16, 4), (64, 1, 16), 512), out=buf6) buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg1_1, (4, 4, 16), (64, 16, 1), 512), reinterpret_tensor(arg1_1, (4, 16, 4), (64, 1, 16), 512), out=buf7) del arg1_1 buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 16), (64, 16, 1), 768), reinterpret_tensor(arg0_1, (4, 16, 4), (64, 1, 16), 768), out=buf9) del arg0_1 buf11 = empty_strided_cuda((), (), torch.float32) buf12 = buf11 del buf11 get_raw_stream(0) triton_per_fused_abs_add_div_mean_sub_0[grid(1)](buf12, buf0, buf1, buf3, buf4, buf6, buf7, buf9, buf10, 1, 64, XBLOCK=1, num_warps =2, num_stages=1) del buf0 del buf1 del buf10 del buf3 del buf4 del buf6 del buf7 del buf9 return buf12, class StyleLossNew(nn.Module): def __init__(self): super().__init__() self.l1loss = nn.L1Loss() def gram(self, feature): N, C, H, W = feature.shape feature = feature.view(N, C, H * W) gram_mat = torch.bmm(feature, torch.transpose(feature, 1, 2)) return gram_mat / (C * H * W) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ljrprocc/Motif-Removal
StyleLoss
false
3,933
[ "MIT" ]
0
8979ca91398212248a2be61345c99bdec53ae37e
https://github.com/ljrprocc/Motif-Removal/tree/8979ca91398212248a2be61345c99bdec53ae37e
PerceptionLoss
import torch import torch.nn as nn class PerceptionLoss(nn.Module): def __init__(self): super().__init__() self.l1loss = nn.L1Loss() def forward(self, results, targets): loss = 0.0 for i, (ress, tars) in enumerate(zip(results, targets)): loss += self.l1loss(ress, tars) return loss / len(results) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_div_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp7 = tl.load(in_ptr0 + (64 + r0), None) tmp8 = tl.load(in_ptr1 + (64 + r0), None) tmp14 = tl.load(in_ptr0 + (128 + r0), None) tmp15 = tl.load(in_ptr1 + (128 + r0), None) tmp21 = tl.load(in_ptr0 + (192 + r0), None) tmp22 = tl.load(in_ptr1 + (192 + r0), None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.sum(tmp4, 1)[:, None] tmp9 = tmp7 - tmp8 tmp10 = tl_math.abs(tmp9) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tmp16 = tmp14 - tmp15 tmp17 = tl_math.abs(tmp16) tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.sum(tmp18, 1)[:, None] tmp23 = tmp21 - tmp22 tmp24 = tl_math.abs(tmp23) tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.sum(tmp25, 1)[:, None] tmp28 = 64.0 tmp29 = tmp6 / tmp28 tmp30 = 0.0 tmp31 = tmp29 + tmp30 tmp32 = tmp13 / tmp28 tmp33 = tmp31 + tmp32 tmp34 = tmp20 / tmp28 tmp35 = tmp33 + tmp34 tmp36 = tmp27 / tmp28 tmp37 = tmp35 + tmp36 tmp38 = 0.25 tmp39 = tmp37 * tmp38 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp39, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf4 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_add_div_mean_sub_0[grid(1)](buf4, arg0_1, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf4, class PerceptionLossNew(nn.Module): def __init__(self): super().__init__() self.l1loss = nn.L1Loss() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ljrprocc/Motif-Removal
PerceptionLoss
false
3,934
[ "MIT" ]
0
8979ca91398212248a2be61345c99bdec53ae37e
https://github.com/ljrprocc/Motif-Removal/tree/8979ca91398212248a2be61345c99bdec53ae37e
lp_L1_Loss
import torch from torch.utils.data import * import torch.nn as nn class lp_L1_Loss(nn.Module): def __init__(self): super().__init__() self.loss = nn.L1Loss(reduction='sum') def forward(self, x, y): b = x.shape[0] loss = self.loss(x, y) return loss / b def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.utils.data import * import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_div_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_div_sub_sum_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class lp_L1_LossNew(nn.Module): def __init__(self): super().__init__() self.loss = nn.L1Loss(reduction='sum') def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
loveorchids/local_patch_retrieval
lp_L1_Loss
false
3,935
[ "Apache-2.0" ]
0
52b2e8fdac965d56ef9f89a8c4de96d0b41d3981
https://github.com/loveorchids/local_patch_retrieval/tree/52b2e8fdac965d56ef9f89a8c4de96d0b41d3981
VariableSoftmax
import torch from torch import Tensor from torch import nn from typing import * class VariableSoftmax(nn.Softmax): """Softmax with temperature""" def __init__(self, temp: 'float'=1, dim: 'int'=-1): super().__init__(dim=dim) self.temp = temp def forward(self, x: 'Tensor') ->Tensor: return super().forward(x / self.temp) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn from typing import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 return buf1, class VariableSoftmaxNew(nn.Softmax): """Softmax with temperature""" def __init__(self, temp: 'float'=1, dim: 'int'=-1): super().__init__(dim=dim) self.temp = temp def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
llayer/pytorch_inferno
VariableSoftmax
false
3,936
[ "Apache-2.0" ]
0
922eba5e04e447126506512eb82adcd9ed1dab25
https://github.com/llayer/pytorch_inferno/tree/922eba5e04e447126506512eb82adcd9ed1dab25
lp_L2_Loss
import torch from torch.utils.data import * import torch.nn as nn class lp_L2_Loss(nn.Module): def __init__(self): super().__init__() self.loss = nn.MSELoss(reduction='sum') def forward(self, x, y): b = x.shape[0] loss = self.loss(x, y) return loss / b def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.utils.data import * import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_div_mse_loss_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class lp_L2_LossNew(nn.Module): def __init__(self): super().__init__() self.loss = nn.MSELoss(reduction='sum') def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
loveorchids/local_patch_retrieval
lp_L2_Loss
false
3,937
[ "Apache-2.0" ]
0
52b2e8fdac965d56ef9f89a8c4de96d0b41d3981
https://github.com/loveorchids/local_patch_retrieval/tree/52b2e8fdac965d56ef9f89a8c4de96d0b41d3981
lp_KL_divergence
import torch from torch.utils.data import * import torch.nn as nn class lp_KL_divergence(nn.Module): def __init__(self): super().__init__() self.loss = nn.KLDivLoss(reduction='batchmean') self.normalize = nn.Softmax(dim=-1) def forward(self, x, y): embed_dim = x.shape[-1] x = x.view(-1, embed_dim) y = y.view(-1, embed_dim) x = self.normalize(x) y = self.normalize(y) loss = self.loss(x, y) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch.utils.data import * import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_red_fused__softmax_div_mul_sub_sum_xlogy_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 256 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp29 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr0 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp2 = tl.load(in_ptr0 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp4 = tl.load(in_ptr0 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp17 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp18 = tl.load(in_ptr1 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp19 = tl.load(in_ptr1 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp21 = tl.load(in_ptr1 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp23 = tl.load(in_ptr1 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float('nan') tmp16 = tl.where(tmp9, tmp15, tmp14) tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp24 = tmp22 + tmp23 tmp25 = tmp17 / tmp24 tmp26 = tmp8 * tmp25 tmp27 = tmp16 - tmp26 tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp30 = _tmp29 + tmp28 _tmp29 = tl.where(rmask, tmp30, _tmp29) tmp29 = tl.sum(_tmp29, 1)[:, None] tmp31 = 0.015625 tmp32 = tmp29 * tmp31 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp32, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) triton_poi_fused__softmax_0[grid(256)](arg0_1, buf2, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_red_fused__softmax_div_mul_sub_sum_xlogy_1[grid(1)](buf4, buf0, buf2, 1, 256, XBLOCK=1, RBLOCK=256, num_warps=8, num_stages=1 ) del buf0 del buf2 return buf4, class lp_KL_divergenceNew(nn.Module): def __init__(self): super().__init__() self.loss = nn.KLDivLoss(reduction='batchmean') self.normalize = nn.Softmax(dim=-1) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
loveorchids/local_patch_retrieval
lp_KL_divergence
false
3,938
[ "Apache-2.0" ]
0
52b2e8fdac965d56ef9f89a8c4de96d0b41d3981
https://github.com/loveorchids/local_patch_retrieval/tree/52b2e8fdac965d56ef9f89a8c4de96d0b41d3981
GraphConvSparse
import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def glorot_init(input_dim, output_dim): init_range = np.sqrt(6.0 / (input_dim + output_dim)) initial = torch.rand(input_dim, output_dim) * 2 * init_range - init_range return nn.Parameter(initial) class GraphConvSparse(nn.Module): def __init__(self, input_dim, output_dim, activation=F.relu, **kwargs): super(GraphConvSparse, self).__init__(**kwargs) self.weight = glorot_init(input_dim, output_dim) self.activation = activation def forward(self, features, adj): _b, _n, _d = features.shape x = features x = torch.einsum('bnd,df->bnf', (x, self.weight)) x = torch.bmm(adj, x) outputs = self.activation(x) return outputs def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'output_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 16, 4), (64, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (1, 16, 4), (64, 4, 1), 0), reinterpret_tensor(primals_2, (1, 4, 4), (16, 4, 1), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(primals_3, reinterpret_tensor(buf0, (4, 4, 4), ( 16, 4, 1), 0), out=buf1) del buf0 buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf2, buf3, reinterpret_tensor(primals_3, (4, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(primals_1, (1, 4, 16), (64, 1, 4), 0) def glorot_init(input_dim, output_dim): init_range = np.sqrt(6.0 / (input_dim + output_dim)) initial = torch.rand(input_dim, output_dim) * 2 * init_range - init_range return nn.Parameter(initial) class GraphConvSparseNew(nn.Module): def __init__(self, input_dim, output_dim, activation=F.relu, **kwargs): super(GraphConvSparseNew, self).__init__(**kwargs) self.weight = glorot_init(input_dim, output_dim) self.activation = activation def forward(self, input_0, input_1): primals_2 = self.weight primals_1 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
ksuchoi216/learn-to-cluster
GraphConvSparse
false
3,939
[ "MIT" ]
0
bef44f92be14e00a96545061a5ecfa7a27da267e
https://github.com/ksuchoi216/learn-to-cluster/tree/bef44f92be14e00a96545061a5ecfa7a27da267e
resnet_block
import torch import torch.nn as nn import torch.nn.functional as F class resnet_block(nn.Module): def __init__(self, dim_in, dim_out): super(resnet_block, self).__init__() self.dim_in = dim_in self.dim_out = dim_out if self.dim_in == self.dim_out: self.conv_1 = nn.Conv2d(self.dim_in, self.dim_out, 3, stride=1, padding=1, bias=False) self.conv_2 = nn.Conv2d(self.dim_out, self.dim_out, 3, stride=1, padding=1, bias=False) nn.init.xavier_uniform_(self.conv_1.weight) nn.init.xavier_uniform_(self.conv_2.weight) else: self.conv_1 = nn.Conv2d(self.dim_in, self.dim_out, 3, stride=2, padding=1, bias=False) self.conv_2 = nn.Conv2d(self.dim_out, self.dim_out, 3, stride=1, padding=1, bias=False) self.conv_s = nn.Conv2d(self.dim_in, self.dim_out, 1, stride=2, padding=0, bias=False) nn.init.xavier_uniform_(self.conv_1.weight) nn.init.xavier_uniform_(self.conv_2.weight) nn.init.xavier_uniform_(self.conv_s.weight) def forward(self, input, is_training=False): if self.dim_in == self.dim_out: output = self.conv_1(input) output = F.leaky_relu(output, negative_slope=0.01, inplace=True) output = self.conv_2(output) output = output + input output = F.leaky_relu(output, negative_slope=0.01, inplace=True) else: output = self.conv_1(input) output = F.leaky_relu(output, negative_slope=0.01, inplace=True) output = self.conv_2(output) input_ = self.conv_s(input) output = output + input_ output = F.leaky_relu(output, negative_slope=0.01, inplace=True) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim_in': 4, 'dim_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_leaky_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.01 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(in_out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_add_leaky_relu_leaky_relu_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(in_out_ptr0 + x0, tmp7, xmask) tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(256)](buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_add_leaky_relu_leaky_relu_backward_1[grid(256)](buf3, primals_2, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf3, primals_1, primals_2, primals_3, buf1, buf4 class resnet_blockNew(nn.Module): def __init__(self, dim_in, dim_out): super(resnet_blockNew, self).__init__() self.dim_in = dim_in self.dim_out = dim_out if self.dim_in == self.dim_out: self.conv_1 = nn.Conv2d(self.dim_in, self.dim_out, 3, stride=1, padding=1, bias=False) self.conv_2 = nn.Conv2d(self.dim_out, self.dim_out, 3, stride=1, padding=1, bias=False) nn.init.xavier_uniform_(self.conv_1.weight) nn.init.xavier_uniform_(self.conv_2.weight) else: self.conv_1 = nn.Conv2d(self.dim_in, self.dim_out, 3, stride=2, padding=1, bias=False) self.conv_2 = nn.Conv2d(self.dim_out, self.dim_out, 3, stride=1, padding=1, bias=False) self.conv_s = nn.Conv2d(self.dim_in, self.dim_out, 1, stride=2, padding=0, bias=False) nn.init.xavier_uniform_(self.conv_1.weight) nn.init.xavier_uniform_(self.conv_2.weight) nn.init.xavier_uniform_(self.conv_s.weight) def forward(self, input_0): primals_1 = self.conv_1.weight primals_3 = self.conv_2.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
luixiao1223/BSP-NET-pytorch
resnet_block
false
3,940
[ "MIT" ]
0
f871c8ce6a9d52ac922e110702c47cd1c89d0a73
https://github.com/luixiao1223/BSP-NET-pytorch/tree/f871c8ce6a9d52ac922e110702c47cd1c89d0a73
DiceLoss
import torch import torch.nn as nn import torch.nn.functional as F class BinaryDiceLoss(nn.Module): """Dice loss of binary class Args: smooth: A float number to smooth loss, and avoid NaN error, default: 1 p: Denominator value: \\sum{x^p} + \\sum{y^p}, default: 2 predict: A tensor of shape [N, *] target: A tensor of shape same with predict reduction: Reduction method to apply, return mean over batch if 'mean', return sum if 'sum', return a tensor of shape [N,] if 'none' Returns: Loss tensor according to arg reduction Raise: Exception if unexpected reduction """ def __init__(self, smooth=1, p=2, reduction='mean'): super(BinaryDiceLoss, self).__init__() self.smooth = smooth self.p = p self.reduction = reduction def forward(self, predict, target): assert predict.shape[0] == target.shape[0 ], "predict & target batch size don't match" predict = predict.contiguous().view(predict.shape[0], -1) target = target.contiguous().view(target.shape[0], -1) num = torch.sum(torch.mul(predict, target), dim=1) + self.smooth den = torch.sum(predict.pow(self.p) + target.pow(self.p), dim=1 ) + self.smooth loss = 1 - num / den if self.reduction == 'mean': return loss.mean() elif self.reduction == 'sum': return loss.sum() elif self.reduction == 'none': return loss else: raise Exception('Unexpected reduction {}'.format(self.reduction)) class DiceLoss(nn.Module): """Dice loss, need one hot encode input Args: weight: An array of shape [num_classes,] ignore_index: class index to ignore predict: A tensor of shape [N, C, *] target: A tensor of same shape with predict other args pass to BinaryDiceLoss Return: same as BinaryDiceLoss """ def __init__(self, weight=None, ignore_index=None, **kwargs): super(DiceLoss, self).__init__() self.kwargs = kwargs self.weight = weight self.ignore_index = ignore_index def forward(self, predict, target): assert predict.shape == target.shape, 'predict & target shape do not match' dice = BinaryDiceLoss(**self.kwargs) total_loss = 0 predict = F.softmax(predict, dim=1) for i in range(target.shape[1]): if i != self.ignore_index: dice_loss = dice(predict[:, i], target[:, i]) if self.weight is not None: assert self.weight.shape[0] == target.shape[1 ], 'Expect weight shape [{}], get[{}]'.format(target .shape[1], self.weight.shape[0]) dice_loss *= self.weights[i] total_loss += dice_loss return total_loss / target.shape[1] def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused_add_mul_pow_sum_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = tmp0 * tmp0 tmp8 = tmp1 * tmp1 tmp9 = tmp7 + tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.where(xmask, tmp10, 0) tmp13 = tl.sum(tmp12, 1)[:, None] tl.store(out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr1 + x0, tmp13, xmask) @triton.jit def triton_per_fused_add_mul_pow_sum_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (48 + r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (48 + r1 + 64 * x0), xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = tmp0 * tmp0 tmp8 = tmp1 * tmp1 tmp9 = tmp7 + tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.where(xmask, tmp10, 0) tmp13 = tl.sum(tmp12, 1)[:, None] tl.store(out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr1 + x0, tmp13, xmask) @triton.jit def triton_per_fused_add_mul_pow_sum_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (16 + r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (16 + r1 + 64 * x0), xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = tmp0 * tmp0 tmp8 = tmp1 * tmp1 tmp9 = tmp7 + tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.where(xmask, tmp10, 0) tmp13 = tl.sum(tmp12, 1)[:, None] tl.store(out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr1 + x0, tmp13, xmask) @triton.jit def triton_per_fused_add_mul_pow_sum_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (32 + r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (32 + r1 + 64 * x0), xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = tmp0 * tmp0 tmp8 = tmp1 * tmp1 tmp9 = tmp7 + tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.where(xmask, tmp10, 0) tmp13 = tl.sum(tmp12, 1)[:, None] tl.store(out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr1 + x0, tmp13, xmask) @triton.jit def triton_per_fused_add_div_mean_rsub_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp10 = tl.load(in_ptr2 + r0, None) tmp12 = tl.load(in_ptr3 + r0, None) tmp19 = tl.load(in_ptr4 + r0, None) tmp21 = tl.load(in_ptr5 + r0, None) tmp28 = tl.load(in_ptr6 + r0, None) tmp30 = tl.load(in_ptr7 + r0, None) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = tmp2 / tmp4 tmp6 = tmp1 - tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp11 = tmp10 + tmp1 tmp13 = tmp12 + tmp1 tmp14 = tmp11 / tmp13 tmp15 = tmp1 - tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.sum(tmp16, 1)[:, None] tmp20 = tmp19 + tmp1 tmp22 = tmp21 + tmp1 tmp23 = tmp20 / tmp22 tmp24 = tmp1 - tmp23 tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.sum(tmp25, 1)[:, None] tmp29 = tmp28 + tmp1 tmp31 = tmp30 + tmp1 tmp32 = tmp29 / tmp31 tmp33 = tmp1 - tmp32 tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK]) tmp36 = tl.sum(tmp34, 1)[:, None] tmp37 = 4.0 tmp38 = tmp9 / tmp37 tmp39 = 0.0 tmp40 = tmp38 + tmp39 tmp41 = tmp18 / tmp37 tmp42 = tmp40 + tmp41 tmp43 = tmp27 / tmp37 tmp44 = tmp42 + tmp43 tmp45 = tmp36 / tmp37 tmp46 = tmp44 + tmp45 tmp47 = 0.25 tmp48 = tmp46 * tmp47 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp48, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 buf2 = empty_strided_cuda((4,), (1,), torch.float32) buf3 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_add_mul_pow_sum_2[grid(4)](buf1, arg1_1, buf2, buf3, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) buf11 = empty_strided_cuda((4,), (1,), torch.float32) buf12 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_add_mul_pow_sum_3[grid(4)](buf1, arg1_1, buf11, buf12, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) buf5 = empty_strided_cuda((4,), (1,), torch.float32) buf6 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_add_mul_pow_sum_4[grid(4)](buf1, arg1_1, buf5, buf6, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) buf8 = empty_strided_cuda((4,), (1,), torch.float32) buf9 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_add_mul_pow_sum_5[grid(4)](buf1, arg1_1, buf8, buf9, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg1_1 del buf1 buf10 = empty_strided_cuda((), (), torch.float32) buf14 = buf10 del buf10 triton_per_fused_add_div_mean_rsub_6[grid(1)](buf14, buf2, buf3, buf5, buf6, buf8, buf9, buf11, buf12, 1, 4, XBLOCK=1, num_warps =2, num_stages=1) del buf11 del buf12 del buf2 del buf3 del buf5 del buf6 del buf8 del buf9 return buf14, class BinaryDiceLoss(nn.Module): """Dice loss of binary class Args: smooth: A float number to smooth loss, and avoid NaN error, default: 1 p: Denominator value: \\sum{x^p} + \\sum{y^p}, default: 2 predict: A tensor of shape [N, *] target: A tensor of shape same with predict reduction: Reduction method to apply, return mean over batch if 'mean', return sum if 'sum', return a tensor of shape [N,] if 'none' Returns: Loss tensor according to arg reduction Raise: Exception if unexpected reduction """ def __init__(self, smooth=1, p=2, reduction='mean'): super(BinaryDiceLoss, self).__init__() self.smooth = smooth self.p = p self.reduction = reduction def forward(self, predict, target): assert predict.shape[0] == target.shape[0 ], "predict & target batch size don't match" predict = predict.contiguous().view(predict.shape[0], -1) target = target.contiguous().view(target.shape[0], -1) num = torch.sum(torch.mul(predict, target), dim=1) + self.smooth den = torch.sum(predict.pow(self.p) + target.pow(self.p), dim=1 ) + self.smooth loss = 1 - num / den if self.reduction == 'mean': return loss.mean() elif self.reduction == 'sum': return loss.sum() elif self.reduction == 'none': return loss else: raise Exception('Unexpected reduction {}'.format(self.reduction)) class DiceLossNew(nn.Module): """Dice loss, need one hot encode input Args: weight: An array of shape [num_classes,] ignore_index: class index to ignore predict: A tensor of shape [N, C, *] target: A tensor of same shape with predict other args pass to BinaryDiceLoss Return: same as BinaryDiceLoss """ def __init__(self, weight=None, ignore_index=None, **kwargs): super(DiceLossNew, self).__init__() self.kwargs = kwargs self.weight = weight self.ignore_index = ignore_index def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ljrprocc/Motif-Removal
DiceLoss
false
3,941
[ "MIT" ]
0
8979ca91398212248a2be61345c99bdec53ae37e
https://github.com/ljrprocc/Motif-Removal/tree/8979ca91398212248a2be61345c99bdec53ae37e
SoftmaxLayer
import torch import torch.nn as nn class SoftmaxLayer(nn.Module): """ Naive softmax-layer """ def __init__(self, output_dim, n_class): """ :param output_dim: int :param n_class: int """ super(SoftmaxLayer, self).__init__() self.hidden2tag = nn.Linear(output_dim, n_class) self.criterion = nn.CrossEntropyLoss(size_average=False) def forward(self, x, y): """ :param x: torch.Tensor :param y: torch.Tensor :return: """ tag_scores = self.hidden2tag(x) return self.criterion(tag_scores, y) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'output_dim': 4, 'n_class': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + r3, None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = -tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](buf0, buf1, 256, XBLOCK= 128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused__log_softmax_mul_neg_sum_1[grid(1)](buf3, buf1, primals_4, 1, 256, num_warps=2, num_stages=1) del buf1 return buf3, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0 class SoftmaxLayerNew(nn.Module): """ Naive softmax-layer """ def __init__(self, output_dim, n_class): """ :param output_dim: int :param n_class: int """ super(SoftmaxLayerNew, self).__init__() self.hidden2tag = nn.Linear(output_dim, n_class) self.criterion = nn.CrossEntropyLoss(size_average=False) def forward(self, input_0, input_1): primals_1 = self.hidden2tag.weight primals_2 = self.hidden2tag.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
luomou97/ELMoForManyLangs
SoftmaxLayer
false
3,942
[ "MIT" ]
0
3e97600baa3a4dde229c1e78c513785e7d50e8e1
https://github.com/luomou97/ELMoForManyLangs/tree/3e97600baa3a4dde229c1e78c513785e7d50e8e1
SELU
import torch from torch import nn import torch.nn.functional as F def where(condition, if_true, if_false): """ Torch equivalent of numpy.where. Parameters ---------- condition : torch.ByteTensor or torch.cuda.ByteTensor Condition to check. if_true : torch.Tensor or torch.cuda.Tensor Output value if condition is true. if_false: torch.Tensor or torch.cuda.Tensor Output value if condition is false Returns ------- torch.Tensor Raises ------ AssertionError if if_true and if_false don't have the same datatype. """ assert if_true.type() == if_false.type( ), 'Type mismatch: {} and {}'.format(if_true.data.type(), if_false. data.type()) casted_condition = condition.type_as(if_true) output = casted_condition * if_true + (1 - casted_condition) * if_false return output class SELU(nn.Module): def forward(self, input): return self.selu(input) @staticmethod def selu(x): alpha = 1.6732632423543772 scale = 1.0507009873554805 return scale * where(x >= 0, x, alpha * F.elu(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy_add_elu_ge_mul_rsub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 >= tmp1 tmp3 = tmp2.to(tl.float32) tmp4 = tmp3 * tmp0 tmp5 = 1.0 tmp6 = tmp5 - tmp3 tmp7 = tmp0 > tmp1 tmp8 = tmp0 * tmp5 tmp9 = libdevice.expm1(tmp8) tmp10 = tmp9 * tmp5 tmp11 = tl.where(tmp7, tmp8, tmp10) tmp12 = 1.6732632423543772 tmp13 = tmp11 * tmp12 tmp14 = tmp6 * tmp13 tmp15 = tmp4 + tmp14 tmp16 = 1.0507009873554805 tmp17 = tmp15 * tmp16 tl.store(out_ptr0 + x0, tmp17, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__to_copy_add_elu_ge_mul_rsub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, def where(condition, if_true, if_false): """ Torch equivalent of numpy.where. Parameters ---------- condition : torch.ByteTensor or torch.cuda.ByteTensor Condition to check. if_true : torch.Tensor or torch.cuda.Tensor Output value if condition is true. if_false: torch.Tensor or torch.cuda.Tensor Output value if condition is false Returns ------- torch.Tensor Raises ------ AssertionError if if_true and if_false don't have the same datatype. """ assert if_true.type() == if_false.type( ), 'Type mismatch: {} and {}'.format(if_true.data.type(), if_false. data.type()) casted_condition = condition.type_as(if_true) output = casted_condition * if_true + (1 - casted_condition) * if_false return output class SELUNew(nn.Module): @staticmethod def selu(x): alpha = 1.6732632423543772 scale = 1.0507009873554805 return scale * where(x >= 0, x, alpha * F.elu(x)) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
krayyalasomayajula/inferno
SELU
false
3,943
[ "Apache-2.0" ]
0
1c56f34ff19c69dec3d3cb6287b659345bce3492
https://github.com/krayyalasomayajula/inferno/tree/1c56f34ff19c69dec3d3cb6287b659345bce3492
Highway
import torch from torch import nn import torch.nn.functional as F class NonCausalConv1d(nn.Module): """Non causal Conv1d with appropriate padding to ensure sequence length stays the same. Note Convolutions always have stride of 1 following layout in paper. """ def __init__(self, in_channels, out_channels, kernel_size, dilation): super().__init__() padding = (kernel_size - 1) * dilation // 2 self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, 1, padding, dilation) def forward(self, x): """ Inputs: x(batch_size x input_dim x seq_len) """ return self.conv(x) class CausalConv1d(nn.Module): """Causal conv1d with appropriate padding to ensure sequence length stays the same. Note Convolutions always have stride of 1 following layout in paper. """ def __init__(self, in_channels, out_channels, kernel_size, dilation): super().__init__() self.padding = (kernel_size - 1) * dilation self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, 1, self.padding, dilation) def forward(self, x): """ Inputs: x(batch_size x input_dim x seq_len) """ x = self.conv(x) if self.padding > 0: return x[:, :, :-self.padding].contiguous() else: return x class Highway(nn.Module): """Highway network with conv1d """ def __init__(self, hidden, kernel_size, dilation, causal=False): self.d = hidden super().__init__() if causal: self.conv = CausalConv1d(hidden, 2 * hidden, kernel_size, dilation) else: self.conv = NonCausalConv1d(hidden, 2 * hidden, kernel_size, dilation) def forward(self, x): """ Inputs: x(batch_size x input_dim x seq_len) """ Hout = self.conv(x) H1 = Hout[:, :self.d, :] H2 = Hout[:, self.d:, :] return F.sigmoid(H1) * H2 + (1 - F.sigmoid(H1)) * x def get_inputs(): return [torch.rand([4, 4, 2])] def get_init_inputs(): return [[], {'hidden': 4, 'kernel_size': 4, 'dilation': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 2 % 4 x2 = xindex // 8 x3 = xindex tmp0 = tl.load(in_ptr0 + (x1 + 8 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tl.load(in_ptr0 + (4 + x1 + 8 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr1 + x3, xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = 1.0 tmp5 = tmp4 - tmp1 tmp7 = tmp5 * tmp6 tmp8 = tmp3 + tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (8, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 2), (8, 2, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 1), (8, 1, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(32)](buf1, primals_2, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) triton_poi_fused_add_mul_rsub_sigmoid_1[grid(32)](buf1, primals_3, buf2, 32, XBLOCK=32, num_warps=1, num_stages=1) return buf2, primals_1, primals_3, buf1 class NonCausalConv1d(nn.Module): """Non causal Conv1d with appropriate padding to ensure sequence length stays the same. Note Convolutions always have stride of 1 following layout in paper. """ def __init__(self, in_channels, out_channels, kernel_size, dilation): super().__init__() padding = (kernel_size - 1) * dilation // 2 self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, 1, padding, dilation) def forward(self, x): """ Inputs: x(batch_size x input_dim x seq_len) """ return self.conv(x) class CausalConv1d(nn.Module): """Causal conv1d with appropriate padding to ensure sequence length stays the same. Note Convolutions always have stride of 1 following layout in paper. """ def __init__(self, in_channels, out_channels, kernel_size, dilation): super().__init__() self.padding = (kernel_size - 1) * dilation self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, 1, self.padding, dilation) def forward(self, x): """ Inputs: x(batch_size x input_dim x seq_len) """ x = self.conv(x) if self.padding > 0: return x[:, :, :-self.padding].contiguous() else: return x class HighwayNew(nn.Module): """Highway network with conv1d """ def __init__(self, hidden, kernel_size, dilation, causal=False): self.d = hidden super().__init__() if causal: self.conv = CausalConv1d(hidden, 2 * hidden, kernel_size, dilation) else: self.conv = NonCausalConv1d(hidden, 2 * hidden, kernel_size, dilation) def forward(self, input_0): primals_1 = self.conv.conv.weight primals_2 = self.conv.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
lstsm12345/DCTTS-PyTorch
Highway
false
3,944
[ "MIT" ]
0
d44b9407b654abc2069bd2a7ef6231572ace1fa7
https://github.com/lstsm12345/DCTTS-PyTorch/tree/d44b9407b654abc2069bd2a7ef6231572ace1fa7
generator
import torch import torch.nn as nn class generator(nn.Module): def __init__(self, p_dim, c_dim): super(generator, self).__init__() self.p_dim = p_dim self.c_dim = c_dim convex_layer_weights = torch.zeros((self.p_dim, self.c_dim)) self.convex_layer_weights = nn.Parameter(convex_layer_weights) nn.init.normal_(self.convex_layer_weights, mean=0.0, std=0.02) def forward(self, points, plane_m, is_training=False): h1 = torch.matmul(points, plane_m) h1 = torch.clamp(h1, min=0) h2 = torch.matmul(h1, (self.convex_layer_weights > 0.01).float()) h3 = torch.min(h2, dim=2, keepdim=True)[0] return h2, h3 def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'p_dim': 4, 'c_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clamp_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_gt_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.01 tmp2 = tmp0 > tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_min_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp2 = triton_helpers.minimum(tmp0, tmp1) tmp4 = triton_helpers.minimum(tmp2, tmp3) tmp6 = triton_helpers.minimum(tmp4, tmp5) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0), out=buf0) del arg0_1 del arg1_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_clamp_0[grid(256)](buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_gt_1[grid(16)](arg2_1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg2_1 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.fallback_mixed_mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, out=buf3) del buf1 del buf2 buf4 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) triton_poi_fused_min_2[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf4 class generatorNew(nn.Module): def __init__(self, p_dim, c_dim): super(generatorNew, self).__init__() self.p_dim = p_dim self.c_dim = c_dim convex_layer_weights = torch.zeros((self.p_dim, self.c_dim)) self.convex_layer_weights = nn.Parameter(convex_layer_weights) nn.init.normal_(self.convex_layer_weights, mean=0.0, std=0.02) def forward(self, input_0, input_1): arg2_1 = self.convex_layer_weights arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
luixiao1223/BSP-NET-pytorch
generator
false
3,945
[ "MIT" ]
0
f871c8ce6a9d52ac922e110702c47cd1c89d0a73
https://github.com/luixiao1223/BSP-NET-pytorch/tree/f871c8ce6a9d52ac922e110702c47cd1c89d0a73
RelationNonLocal
import torch import torch.nn as nn class RelationNonLocal(nn.Module): def __init__(self, C): super(RelationNonLocal, self).__init__() self.conv_fv = nn.Conv2d(C, C, kernel_size=1, stride=1) self.conv_fk = nn.Conv2d(C, C, kernel_size=1, stride=1) self.conv_fq = nn.Conv2d(C, C, kernel_size=1, stride=1) self.conv_fr = nn.Conv2d(C, C, kernel_size=1, stride=1) def forward(self, input_): N, C, H, W = input_.shape f_v = self.conv_fv(input_) f_k = self.conv_fk(input_) f_q = self.conv_fq(input_) f_k = f_k.reshape([N, C, H * W]).permute(0, 2, 1) f_q = f_q.reshape([N, C, H * W]) w = torch.matmul(f_k, f_q) / (H * W) f_r = torch.matmul(w.permute(0, 2, 1), f_v.reshape([N, C, H * W]). permute(0, 2, 1)).permute(0, 2, 1) f_r = f_r.reshape(N, C, H, W) f_r = self.conv_fr(f_r) return f_r def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'C': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_div_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 0.0625 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = extern_kernels.convolution(primals_1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = extern_kernels.convolution(primals_1, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_convolution_0[grid(256)](buf3, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = buf2 del buf2 triton_poi_fused_convolution_0[grid(256)](buf4, primals_7, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf5 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (4, 16, 4), (64, 1, 16), 0), reinterpret_tensor(buf4, (4, 4, 16), (64, 16, 1), 0), out=buf5) buf6 = buf0 del buf0 triton_poi_fused_convolution_0[grid(256)](buf6, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf7 = buf5 del buf5 triton_poi_fused_div_1[grid(1024)](buf7, 1024, XBLOCK=256, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf7, (4, 16, 16), (256, 1, 16), 0), reinterpret_tensor(buf6, (4, 16, 4), (64, 1, 16), 0), out=buf8) buf9 = extern_kernels.convolution(reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 1, 16, 4), 0), primals_8, stride=(1, 1), padding=(0, 0 ), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 4, 4, 4), (64, 1, 16, 4)) buf10 = buf9 del buf9 triton_poi_fused_convolution_2[grid(256)](buf10, primals_9, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_9 return (buf10, primals_1, primals_2, primals_4, primals_6, primals_8, reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 1, 16, 4), 0), buf7, reinterpret_tensor(buf6, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(buf3, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(buf4, (4, 16, 4), (64, 1, 16), 0)) class RelationNonLocalNew(nn.Module): def __init__(self, C): super(RelationNonLocalNew, self).__init__() self.conv_fv = nn.Conv2d(C, C, kernel_size=1, stride=1) self.conv_fk = nn.Conv2d(C, C, kernel_size=1, stride=1) self.conv_fq = nn.Conv2d(C, C, kernel_size=1, stride=1) self.conv_fr = nn.Conv2d(C, C, kernel_size=1, stride=1) def forward(self, input_0): primals_2 = self.conv_fv.weight primals_3 = self.conv_fv.bias primals_4 = self.conv_fk.weight primals_5 = self.conv_fk.bias primals_6 = self.conv_fq.weight primals_7 = self.conv_fq.bias primals_8 = self.conv_fr.weight primals_9 = self.conv_fr.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
luozn15/FloorplanGAN
RelationNonLocal
false
3,946
[ "MIT" ]
0
113813c2e857c5cd4e64c92626d359e5746e9eab
https://github.com/luozn15/FloorplanGAN/tree/113813c2e857c5cd4e64c92626d359e5746e9eab
RegularizedLinear
import torch from torch import nn class RegularizedLinear(nn.Linear): def __init__(self, *args, ar_weight=0.001, l1_weight=0.001, **kwargs): super(RegularizedLinear, self).__init__(*args, **kwargs) self.ar_weight = ar_weight self.l1_weight = l1_weight self._losses = {} def forward(self, input): output = super(RegularizedLinear, self).forward(input) self._losses['activity_regularization'] = (output * output).sum( ) * self.ar_weight self._losses['l1_weight_regularization'] = torch.abs(self.weight).sum( ) * self.l1_weight return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mul_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [RBLOCK]) tmp4 = triton_helpers.promote_to_tensor(tl.sum(tmp2, 0)) tmp5 = 0.001 tmp6 = tmp4 * tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp6, None) @triton.jit def triton_per_fused_abs_mul_sum_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl_math.abs(tmp0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = 0.001 tmp6 = tmp4 * tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_2 buf1 = empty_strided_cuda((), (), torch.float32) buf3 = buf1 del buf1 get_raw_stream(0) triton_per_fused_mul_sum_0[grid(1)](buf3, buf0, 1, 256, num_warps=2, num_stages=1) buf2 = empty_strided_cuda((), (), torch.float32) buf4 = buf2 del buf2 triton_per_fused_abs_mul_sum_1[grid(1)](buf4, primals_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf3, buf4, primals_1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) class RegularizedLinearNew(nn.Linear): def __init__(self, *args, ar_weight=0.001, l1_weight=0.001, **kwargs): super(RegularizedLinearNew, self).__init__(*args, **kwargs) self.ar_weight = ar_weight self.l1_weight = l1_weight self._losses = {} def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
krayyalasomayajula/inferno
RegularizedLinear
false
3,947
[ "Apache-2.0" ]
0
1c56f34ff19c69dec3d3cb6287b659345bce3492
https://github.com/krayyalasomayajula/inferno/tree/1c56f34ff19c69dec3d3cb6287b659345bce3492
MSE
import torch import torch.nn as nn import torch.utils.checkpoint class MSE(nn.Module): def __init__(self): super(MSE, self).__init__() def forward(self, pred, real): diffs = torch.add(real, -pred) n = torch.numel(diffs.data) mse = torch.sum(diffs.pow(2)) / n return mse def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_neg_pow_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = -tmp1 tmp3 = tmp0 + tmp2 tmp4 = tmp3 * tmp3 tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = 0.00390625 tmp9 = tmp7 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_neg_pow_sum_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class MSENew(nn.Module): def __init__(self): super(MSENew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
lyh512796310/MMSA
MSE
false
3,948
[ "MIT" ]
0
e1735afd1b4e763995ab7aacb001884a7b7146ff
https://github.com/lyh512796310/MMSA/tree/e1735afd1b4e763995ab7aacb001884a7b7146ff
WeightedMSELoss
import torch from torch import nn def assert_(condition, message='', exception_type=AssertionError): """Like assert, but with arbitrary exception types.""" if not condition: raise exception_type(message) class WeightedMSELoss(nn.Module): NEGATIVE_CLASS_WEIGHT = 1.0 def __init__(self, positive_class_weight=1.0, positive_class_value=1.0, size_average=True): super(WeightedMSELoss, self).__init__() assert_(positive_class_weight >= 0, "Positive class weight can't be less than zero, got {}.".format (positive_class_weight), ValueError) self.mse = nn.MSELoss(size_average=size_average) self.positive_class_weight = positive_class_weight self.positive_class_value = positive_class_value def forward(self, input, target): positive_class_mask = target.data.eq(self.positive_class_value ).type_as(target.data) weight_differential = positive_class_mask.mul_(self. positive_class_weight - self.NEGATIVE_CLASS_WEIGHT) weights = weight_differential.add_(self.NEGATIVE_CLASS_WEIGHT) sqrt_weights = weights.sqrt_() return self.mse(input * sqrt_weights, target * sqrt_weights) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused__to_copy_add_eq_mse_loss_mul_sqrt_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = 1.0 tmp3 = tmp1 == tmp2 tmp4 = tmp3.to(tl.float32) tmp5 = 0.0 tmp6 = tmp4 * tmp5 tmp7 = tmp6 + tmp2 tmp8 = libdevice.sqrt(tmp7) tmp9 = tmp0 * tmp8 tmp10 = tmp1 * tmp8 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused__to_copy_add_eq_mse_loss_mul_sqrt_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def assert_(condition, message='', exception_type=AssertionError): """Like assert, but with arbitrary exception types.""" if not condition: raise exception_type(message) class WeightedMSELossNew(nn.Module): NEGATIVE_CLASS_WEIGHT = 1.0 def __init__(self, positive_class_weight=1.0, positive_class_value=1.0, size_average=True): super(WeightedMSELossNew, self).__init__() assert_(positive_class_weight >= 0, "Positive class weight can't be less than zero, got {}.".format (positive_class_weight), ValueError) self.mse = nn.MSELoss(size_average=size_average) self.positive_class_weight = positive_class_weight self.positive_class_value = positive_class_value def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
krayyalasomayajula/inferno
WeightedMSELoss
false
3,949
[ "Apache-2.0" ]
0
1c56f34ff19c69dec3d3cb6287b659345bce3492
https://github.com/krayyalasomayajula/inferno/tree/1c56f34ff19c69dec3d3cb6287b659345bce3492
PatchMerging
import torch import torch.nn as nn import torch.nn.functional as F class PatchMerging(nn.Module): """ Patch Merging Layer Args: dim (int): Number of input channels. norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm """ def __init__(self, dim, norm_layer=nn.LayerNorm): super().__init__() self.dim = dim self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) self.norm = norm_layer(4 * dim) def forward(self, x): """ Forward function. Args: x: Input feature, tensor size (B, D, H, W, C). """ _B, _D, H, W, _C = x.shape pad_input = H % 2 == 1 or W % 2 == 1 if pad_input: x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2)) x0 = x[:, :, 0::2, 0::2, :] x1 = x[:, :, 1::2, 0::2, :] x2 = x[:, :, 0::2, 1::2, :] x3 = x[:, :, 1::2, 1::2, :] x = torch.cat([x0, x1, x2, x3], -1) x = self.norm(x) x = self.reduction(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_cat_native_layer_norm_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr ): xnumel = 64 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 2 x1 = xindex // 2 x3 = xindex tmp46 = tl.load(in_ptr1 + r2, None, eviction_policy='evict_last') tmp48 = tl.load(in_ptr2 + r2, None, eviction_policy='evict_last') tmp0 = r2 tl.full([1, 1], 0, tl.int64) tmp3 = tl.full([1, 1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (8 * x0 + 32 * x1 + r2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1, 1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (16 + 8 * x0 + 32 * x1 + (-4 + r2)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1, 1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (4 + 8 * x0 + 32 * x1 + (-8 + r2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1, 1], 16, tl.int64) tmp19 = tl.load(in_ptr0 + (20 + 8 * x0 + 32 * x1 + (-12 + r2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK]) tl.where(xmask, tmp23, 0) tmp26 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp28 = tl.where(xmask, tmp26, 0) tmp29 = tl.sum(tmp28, 1)[:, None] tmp30 = tl.full([XBLOCK, 1], 16, tl.int32) tmp31 = tmp30.to(tl.float32) tmp32 = tmp29 / tmp31 tmp33 = tmp23 - tmp32 tmp34 = tmp33 * tmp33 tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK]) tmp37 = tl.where(xmask, tmp35, 0) tmp38 = tl.sum(tmp37, 1)[:, None] tmp39 = 16.0 tmp40 = tmp38 / tmp39 tmp41 = 1e-05 tmp42 = tmp40 + tmp41 tmp43 = libdevice.rsqrt(tmp42) tmp44 = tmp22 - tmp32 tmp45 = tmp44 * tmp43 tmp47 = tmp45 * tmp46 tmp49 = tmp47 + tmp48 tl.store(out_ptr0 + (r2 + 16 * x3), tmp22, xmask) tl.debug_barrier() tl.store(in_out_ptr0 + x3, tmp43, xmask) tl.store(out_ptr2 + (r2 + 16 * x3), tmp49, xmask) tl.store(out_ptr1 + x3, tmp32, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (16,), (1,)) assert_size_stride(primals_4, (8, 16), (16, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2, 16), (256, 64, 32, 16, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 2, 2, 1), (16, 4, 2, 1, 1), torch. float32) buf2 = empty_strided_cuda((4, 4, 2, 2, 1), (16, 4, 2, 1, 64), torch .float32) buf4 = reinterpret_tensor(buf2, (4, 4, 2, 2, 1), (16, 4, 2, 1, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 4, 2, 2, 16), (256, 64, 32, 16, 1), torch.float32) get_raw_stream(0) triton_per_fused_cat_native_layer_norm_0[grid(64)](buf4, primals_1, primals_2, primals_3, buf0, buf1, buf5, 64, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_1 del primals_2 del primals_3 buf6 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (64, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 8), (1, 16), 0), out=buf6) return reinterpret_tensor(buf6, (4, 4, 2, 2, 8), (128, 32, 16, 8, 1), 0 ), buf0, buf1, buf4, reinterpret_tensor(buf5, (64, 16), (16, 1), 0 ), primals_4 class PatchMergingNew(nn.Module): """ Patch Merging Layer Args: dim (int): Number of input channels. norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm """ def __init__(self, dim, norm_layer=nn.LayerNorm): super().__init__() self.dim = dim self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) self.norm = norm_layer(4 * dim) def forward(self, input_0): primals_4 = self.reduction.weight primals_2 = self.norm.weight primals_3 = self.norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
luohwu/video-swin-transformer-pytorch
PatchMerging
false
3,950
[ "MIT" ]
0
ad96877a6db44436183a03e5b9a80c425726c982
https://github.com/luohwu/video-swin-transformer-pytorch/tree/ad96877a6db44436183a03e5b9a80c425726c982
SorensenDiceLoss
import torch from torch import nn def assert_(condition, message='', exception_type=AssertionError): """Like assert, but with arbitrary exception types.""" if not condition: raise exception_type(message) def flatten_samples(input_): """ Flattens a tensor or a variable such that the channel axis is first and the sample axis is second. The shapes are transformed as follows: (N, C, H, W) --> (C, N * H * W) (N, C, D, H, W) --> (C, N * D * H * W) (N, C) --> (C, N) The input must be atleast 2d. """ assert_(input_.dim() >= 2, 'Tensor or variable must be atleast 2D. Got one of dim {}.'.format( input_.dim()), ShapeError) num_channels = input_.size(1) permute_axes = list(range(input_.dim())) permute_axes[0], permute_axes[1] = permute_axes[1], permute_axes[0] permuted = input_.permute(*permute_axes).contiguous() flattened = permuted.view(num_channels, -1) return flattened class ShapeError(ValueError): pass class SorensenDiceLoss(nn.Module): """ Computes a loss scalar, which when minimized maximizes the Sorensen-Dice similarity between the input and the target. For both inputs and targets it must be the case that `input_or_target.size(1) = num_channels`. """ def __init__(self, weight=None, channelwise=True, eps=1e-06): """ Parameters ---------- weight : torch.FloatTensor or torch.cuda.FloatTensor Class weights. Applies only if `channelwise = True`. channelwise : bool Whether to apply the loss channelwise and sum the results (True) or to apply it on all channels jointly (False). """ super(SorensenDiceLoss, self).__init__() self.register_buffer('weight', weight) self.channelwise = channelwise self.eps = eps def forward(self, input, target): """ input: torch.FloatTensor or torch.cuda.FloatTensor target: torch.FloatTensor or torch.cuda.FloatTensor Expected shape of the inputs: (batch_size, nb_channels, ...) """ assert input.size() == target.size() if not self.channelwise: numerator = (input * target).sum() denominator = (input * input).sum() + (target * target).sum() loss = -2.0 * (numerator / denominator.clamp(min=self.eps)) else: input = flatten_samples(input) target = flatten_samples(target) numerator = (input * target).sum(-1) denominator = (input * input).sum(-1) + (target * target).sum(-1) channelwise_loss = -2 * (numerator / denominator.clamp(min=self .eps)) if self.weight is not None: if channelwise_loss.dim() == 2: channelwise_loss = channelwise_loss.squeeze(1) assert self.weight.size() == channelwise_loss.size() channelwise_loss = self.weight * channelwise_loss loss = channelwise_loss.sum() return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (16 * x0 + 64 * (r1 // 16) + r1 % 16), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (16 * x0 + 64 * (r1 // 16) + r1 % 16), xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = tmp0 * tmp0 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = tmp1 * tmp1 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tl.store(out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr1 + x0, tmp11, xmask) tl.store(out_ptr2 + x0, tmp16, xmask) @triton.jit def triton_per_fused_add_clamp_div_mul_sum_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tl.load(in_ptr2 + r0, None) tmp3 = tmp1 + tmp2 tmp4 = 1e-06 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp0 / tmp5 tmp7 = -2.0 tmp8 = tmp6 * tmp7 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp11, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) buf1 = empty_strided_cuda((4,), (1,), torch.float32) buf2 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_mul_sum_0[grid(4)](arg0_1, arg1_1, buf0, buf1, buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) triton_per_fused_add_clamp_div_mul_sum_1[grid(1)](buf0, buf1, buf2, buf3, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 del buf2 return buf3, def assert_(condition, message='', exception_type=AssertionError): """Like assert, but with arbitrary exception types.""" if not condition: raise exception_type(message) def flatten_samples(input_): """ Flattens a tensor or a variable such that the channel axis is first and the sample axis is second. The shapes are transformed as follows: (N, C, H, W) --> (C, N * H * W) (N, C, D, H, W) --> (C, N * D * H * W) (N, C) --> (C, N) The input must be atleast 2d. """ assert_(input_.dim() >= 2, 'Tensor or variable must be atleast 2D. Got one of dim {}.'.format( input_.dim()), ShapeError) num_channels = input_.size(1) permute_axes = list(range(input_.dim())) permute_axes[0], permute_axes[1] = permute_axes[1], permute_axes[0] permuted = input_.permute(*permute_axes).contiguous() flattened = permuted.view(num_channels, -1) return flattened class ShapeError(ValueError): pass class SorensenDiceLossNew(nn.Module): """ Computes a loss scalar, which when minimized maximizes the Sorensen-Dice similarity between the input and the target. For both inputs and targets it must be the case that `input_or_target.size(1) = num_channels`. """ def __init__(self, weight=None, channelwise=True, eps=1e-06): """ Parameters ---------- weight : torch.FloatTensor or torch.cuda.FloatTensor Class weights. Applies only if `channelwise = True`. channelwise : bool Whether to apply the loss channelwise and sum the results (True) or to apply it on all channels jointly (False). """ super(SorensenDiceLossNew, self).__init__() self.register_buffer('weight', weight) self.channelwise = channelwise self.eps = eps def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
krayyalasomayajula/inferno
SorensenDiceLoss
false
3,951
[ "Apache-2.0" ]
0
1c56f34ff19c69dec3d3cb6287b659345bce3492
https://github.com/krayyalasomayajula/inferno/tree/1c56f34ff19c69dec3d3cb6287b659345bce3492
SIMSE
import torch import torch.nn as nn import torch.utils.checkpoint class SIMSE(nn.Module): def __init__(self): super(SIMSE, self).__init__() def forward(self, pred, real): diffs = torch.add(real, -pred) n = torch.numel(diffs.data) simse = torch.sum(diffs).pow(2) / n ** 2 return simse def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_neg_pow_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = -tmp1 tmp3 = tmp0 + tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = tmp6 * tmp6 tmp8 = 1.52587890625e-05 tmp9 = tmp7 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_neg_pow_sum_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class SIMSENew(nn.Module): def __init__(self): super(SIMSENew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
lyh512796310/MMSA
SIMSE
false
3,952
[ "MIT" ]
0
e1735afd1b4e763995ab7aacb001884a7b7146ff
https://github.com/lyh512796310/MMSA/tree/e1735afd1b4e763995ab7aacb001884a7b7146ff
DiffLoss
import torch import torch.nn as nn import torch.utils.checkpoint class DiffLoss(nn.Module): def __init__(self): super(DiffLoss, self).__init__() def forward(self, input1, input2): batch_size = input1.size(0) input1 = input1.view(batch_size, -1) input2 = input2.view(batch_size, -1) input1_mean = torch.mean(input1, dim=0, keepdims=True) input2_mean = torch.mean(input2, dim=0, keepdims=True) input1 = input1 - input1_mean input2 = input2 - input2_mean input1_l2_norm = torch.norm(input1, p=2, dim=1, keepdim=True).detach() input1_l2 = input1.div(input1_l2_norm.expand_as(input1) + 1e-06) input2_l2_norm = torch.norm(input2, p=2, dim=1, keepdim=True).detach() input2_l2 = input2.div(input2_l2_norm.expand_as(input2) + 1e-06) diff_loss = torch.mean(input1_l2.t().mm(input2_l2).pow(2)) return diff_loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_div_linalg_vector_norm_mean_sub_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + r1, None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (64 + r1), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (128 + r1), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (192 + r1), None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.where(xmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tmp16 = libdevice.sqrt(tmp15) tmp17 = 1e-06 tmp18 = tmp16 + tmp17 tmp19 = tmp10 / tmp18 tl.store(out_ptr2 + (r1 + 64 * x0), tmp19, xmask) @triton.jit def triton_red_fused_mean_pow_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r0 = rindex tmp0 = tl.load(in_ptr0 + r0, rmask, eviction_policy='evict_first', other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = _tmp3 + tmp2 _tmp3 = tl.where(rmask, tmp4, _tmp3) tmp3 = tl.sum(_tmp3, 1)[:, None] tmp5 = 4096.0 tmp6 = tmp3 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf4 = empty_strided_cuda((4, 64), (64, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_div_linalg_vector_norm_mean_sub_0[grid(4)](arg0_1, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 buf5 = empty_strided_cuda((4, 64), (64, 1), torch.float32) triton_per_fused_add_div_linalg_vector_norm_mean_sub_0[grid(4)](arg1_1, buf5, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg1_1 buf6 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (1, 64), 0), buf5, out=buf6) del buf4 del buf5 buf7 = empty_strided_cuda((), (), torch.float32) buf8 = buf7 del buf7 triton_red_fused_mean_pow_1[grid(1)](buf8, buf6, 1, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) del buf6 return buf8, class DiffLossNew(nn.Module): def __init__(self): super(DiffLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
lyh512796310/MMSA
DiffLoss
false
3,953
[ "MIT" ]
0
e1735afd1b4e763995ab7aacb001884a7b7146ff
https://github.com/lyh512796310/MMSA/tree/e1735afd1b4e763995ab7aacb001884a7b7146ff
TimeEncode
import torch import numpy as np import torch.nn as nn class TimeEncode(nn.Module): """Use finite fourier series with different phase and frequency to encode time different between two event ..math:: \\Phi(t) = [\\cos(\\omega_0t+\\psi_0),\\cos(\\omega_1t+\\psi_1),...,\\cos(\\omega_nt+\\psi_n)] Parameter ---------- dimension : int Length of the fourier series. The longer it is , the more timescale information it can capture Example ---------- >>> tecd = TimeEncode(10) >>> t = torch.tensor([[1]]) >>> tecd(t) tensor([[[0.5403, 0.9950, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000]]], dtype=torch.float64, grad_fn=<CosBackward>) """ def __init__(self, dimension): super(TimeEncode, self).__init__() self.dimension = dimension self.w = torch.nn.Linear(1, dimension) self.w.weight = torch.nn.Parameter(torch.from_numpy(1 / 10 ** np. linspace(0, 9, dimension)).float().reshape(dimension, -1)) self.w.bias = torch.nn.Parameter(torch.zeros(dimension).float()) def forward(self, t): t = t.unsqueeze(dim=2).float() output = torch.cos(self.w(t)) return output def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'dimension': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cos_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.cos(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 1), (1, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16, 1), (1, 1), 0), reinterpret_tensor(primals_2, (1, 4), (1, 1), 0 ), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cos_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf1, reinterpret_tensor(primals_1, (16, 1), (1, 1), 0), buf0 class TimeEncodeNew(nn.Module): """Use finite fourier series with different phase and frequency to encode time different between two event ..math:: \\Phi(t) = [\\cos(\\omega_0t+\\psi_0),\\cos(\\omega_1t+\\psi_1),...,\\cos(\\omega_nt+\\psi_n)] Parameter ---------- dimension : int Length of the fourier series. The longer it is , the more timescale information it can capture Example ---------- >>> tecd = TimeEncode(10) >>> t = torch.tensor([[1]]) >>> tecd(t) tensor([[[0.5403, 0.9950, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000]]], dtype=torch.float64, grad_fn=<CosBackward>) """ def __init__(self, dimension): super(TimeEncodeNew, self).__init__() self.dimension = dimension self.w = torch.nn.Linear(1, dimension) self.w.weight = torch.nn.Parameter(torch.from_numpy(1 / 10 ** np. linspace(0, 9, dimension)).float().reshape(dimension, -1)) self.w.bias = torch.nn.Parameter(torch.zeros(dimension).float()) def forward(self, input_0): primals_2 = self.w.weight primals_3 = self.w.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
lxylxyoo/WSDM2022
TimeEncode
false
3,954
[ "MIT" ]
0
970aa5e9d0ccf597af33368ae1ad565543daa4de
https://github.com/lxylxyoo/WSDM2022/tree/970aa5e9d0ccf597af33368ae1ad565543daa4de
ExpActivation
import torch import torch.nn as nn class ExpActivation(nn.Module): def __init__(self): super(ExpActivation, self).__init__() def forward(self, x): return torch.exp(-x ** 2) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_exp_neg_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 * tmp0 tmp2 = -tmp1 tmp3 = tl_math.exp(tmp2) tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_exp_neg_pow_0[grid(256)](arg0_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 return buf0, class ExpActivationNew(nn.Module): def __init__(self): super(ExpActivationNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
mahkons/orthogonal
ExpActivation
false
3,955
[ "MIT" ]
0
19a69134ca9a01ef564eab624b8c1526291770aa
https://github.com/mahkons/orthogonal/tree/19a69134ca9a01ef564eab624b8c1526291770aa
encoder
import torch import torch.nn as nn import torch.nn.functional as F class encoder(nn.Module): def __init__(self, ef_dim): super(encoder, self).__init__() self.ef_dim = ef_dim self.conv_1 = nn.Conv3d(1, self.ef_dim, 4, stride=2, padding=1, bias=True) self.conv_2 = nn.Conv3d(self.ef_dim, self.ef_dim * 2, 4, stride=2, padding=1, bias=True) self.conv_3 = nn.Conv3d(self.ef_dim * 2, self.ef_dim * 4, 4, stride =2, padding=1, bias=True) self.conv_4 = nn.Conv3d(self.ef_dim * 4, self.ef_dim * 8, 4, stride =2, padding=1, bias=True) self.conv_5 = nn.Conv3d(self.ef_dim * 8, self.ef_dim * 8, 4, stride =1, padding=0, bias=True) nn.init.xavier_uniform_(self.conv_1.weight) nn.init.constant_(self.conv_1.bias, 0) nn.init.xavier_uniform_(self.conv_2.weight) nn.init.constant_(self.conv_2.bias, 0) nn.init.xavier_uniform_(self.conv_3.weight) nn.init.constant_(self.conv_3.bias, 0) nn.init.xavier_uniform_(self.conv_4.weight) nn.init.constant_(self.conv_4.bias, 0) nn.init.xavier_uniform_(self.conv_5.weight) nn.init.constant_(self.conv_5.bias, 0) def forward(self, inputs, is_training=False): d_1 = self.conv_1(inputs) d_1 = F.leaky_relu(d_1, negative_slope=0.01, inplace=True) d_2 = self.conv_2(d_1) d_2 = F.leaky_relu(d_2, negative_slope=0.01, inplace=True) d_3 = self.conv_3(d_2) d_3 = F.leaky_relu(d_3, negative_slope=0.01, inplace=True) d_4 = self.conv_4(d_3) d_4 = F.leaky_relu(d_4, negative_slope=0.01, inplace=True) d_5 = self.conv_5(d_4) d_5 = d_5.view(-1, self.ef_dim * 8) d_5 = torch.sigmoid(d_5) return d_5 def get_inputs(): return [torch.rand([4, 1, 64, 64, 64])] def get_init_inputs(): return [[], {'ef_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 32768 % 4 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 8 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 512 % 16 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_sigmoid_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 1, 4, 4, 4), (64, 64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64, 64), (262144, 262144, 4096, 64, 1)) assert_size_stride(primals_4, (8, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_5, (8,), (1,)) assert_size_stride(primals_6, (16, 8, 4, 4, 4), (512, 64, 16, 4, 1)) assert_size_stride(primals_7, (16,), (1,)) assert_size_stride(primals_8, (32, 16, 4, 4, 4), (1024, 64, 16, 4, 1)) assert_size_stride(primals_9, (32,), (1,)) assert_size_stride(primals_10, (32, 32, 4, 4, 4), (2048, 64, 16, 4, 1)) assert_size_stride(primals_11, (32,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2, 2), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 32, 32, 32), (131072, 32768, 1024, 32, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf1, primals_2, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2, 2), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 8, 16, 16, 16), (32768, 4096, 256, 16, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_leaky_relu_1[grid(131072)](buf3, primals_5, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2, 2), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 8, 8, 8), (8192, 512, 64, 8, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_leaky_relu_2[grid(32768)](buf5, primals_7, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2, 2), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 32, 4, 4, 4), (2048, 64, 16, 4, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_leaky_relu_3[grid(8192)](buf7, primals_9, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1, 1 ), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 32, 1, 1, 1), (32, 1, 1, 1, 1)) buf9 = reinterpret_tensor(buf8, (4, 32), (32, 1), 0) del buf8 triton_poi_fused_sigmoid_4[grid(128)](buf9, primals_11, 128, XBLOCK =128, num_warps=4, num_stages=1) del primals_11 return (buf9, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf5, buf7, buf9) class encoderNew(nn.Module): def __init__(self, ef_dim): super(encoderNew, self).__init__() self.ef_dim = ef_dim self.conv_1 = nn.Conv3d(1, self.ef_dim, 4, stride=2, padding=1, bias=True) self.conv_2 = nn.Conv3d(self.ef_dim, self.ef_dim * 2, 4, stride=2, padding=1, bias=True) self.conv_3 = nn.Conv3d(self.ef_dim * 2, self.ef_dim * 4, 4, stride =2, padding=1, bias=True) self.conv_4 = nn.Conv3d(self.ef_dim * 4, self.ef_dim * 8, 4, stride =2, padding=1, bias=True) self.conv_5 = nn.Conv3d(self.ef_dim * 8, self.ef_dim * 8, 4, stride =1, padding=0, bias=True) nn.init.xavier_uniform_(self.conv_1.weight) nn.init.constant_(self.conv_1.bias, 0) nn.init.xavier_uniform_(self.conv_2.weight) nn.init.constant_(self.conv_2.bias, 0) nn.init.xavier_uniform_(self.conv_3.weight) nn.init.constant_(self.conv_3.bias, 0) nn.init.xavier_uniform_(self.conv_4.weight) nn.init.constant_(self.conv_4.bias, 0) nn.init.xavier_uniform_(self.conv_5.weight) nn.init.constant_(self.conv_5.bias, 0) def forward(self, input_0): primals_1 = self.conv_1.weight primals_2 = self.conv_1.bias primals_4 = self.conv_2.weight primals_5 = self.conv_2.bias primals_6 = self.conv_3.weight primals_7 = self.conv_3.bias primals_8 = self.conv_4.weight primals_9 = self.conv_4.bias primals_10 = self.conv_5.weight primals_11 = self.conv_5.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
luixiao1223/BSP-NET-pytorch
encoder
false
3,956
[ "MIT" ]
0
f871c8ce6a9d52ac922e110702c47cd1c89d0a73
https://github.com/luixiao1223/BSP-NET-pytorch/tree/f871c8ce6a9d52ac922e110702c47cd1c89d0a73
convnet
import torch import torch.nn as nn class convnet(nn.Module): def __init__(self, in_channel, dim): super(convnet, self).__init__() self.conv1 = nn.Conv2d(in_channel, 32, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(32, 1, kernel_size=1) def forward(self, x): x = self.conv1(x) x = torch.relu(x) x = self.conv2(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (32, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 32, 1, 1), (32, 1, 1, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 4, 4), (512, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(2048)](buf1, primals_2, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 4, 4), (16, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(64)](buf3, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1 class convnetNew(nn.Module): def __init__(self, in_channel, dim): super(convnetNew, self).__init__() self.conv1 = nn.Conv2d(in_channel, 32, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(32, 1, kernel_size=1) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
lzz0007/pyGAT
convnet
false
3,957
[ "MIT" ]
0
ea09c56037185ec5924dcd20b9c09d151174d1a3
https://github.com/lzz0007/pyGAT/tree/ea09c56037185ec5924dcd20b9c09d151174d1a3
MLPBody
import torch import torch.nn.functional as F import torch.nn as nn def layer_init(layer, w_scale=1.0): init_f = nn.init.orthogonal_ init_f(layer.weight.data) layer.weight.data.mul_(w_scale) if layer.bias is not None: nn.init.constant_(layer.bias.data, 0) return layer class MLPBody(nn.Module): def __init__(self, input_dim, feature_dim=512, hidden_dim=512): super().__init__() self.fc1 = layer_init(nn.Linear(input_dim, hidden_dim)) self.fc2 = layer_init(nn.Linear(hidden_dim, feature_dim)) self.feature_dim = feature_dim def forward(self, x): return self.fc2(F.relu(self.fc1(x.view(x.size(0), -1)))) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (512, 4), (4, 1)) assert_size_stride(primals_3, (512,), (1,)) assert_size_stride(primals_4, (512, 512), (512, 1)) assert_size_stride(primals_5, (512,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512), (512, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 512), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(2048)](buf1, primals_3, 2048, XBLOCK= 128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 512), (512, 1), torch.float32) extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (512, 512), (1, 512), 0), alpha=1, beta=1, out=buf2) del primals_5 return buf2, primals_1, buf1, primals_4 def layer_init(layer, w_scale=1.0): init_f = nn.init.orthogonal_ init_f(layer.weight.data) layer.weight.data.mul_(w_scale) if layer.bias is not None: nn.init.constant_(layer.bias.data, 0) return layer class MLPBodyNew(nn.Module): def __init__(self, input_dim, feature_dim=512, hidden_dim=512): super().__init__() self.fc1 = layer_init(nn.Linear(input_dim, hidden_dim)) self.fc2 = layer_init(nn.Linear(hidden_dim, feature_dim)) self.feature_dim = feature_dim def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
lchenat/TSA
MLPBody
false
3,958
[ "Apache-2.0" ]
0
661266ba16e06f63962b306a7c30d25f37920c2d
https://github.com/lchenat/TSA/tree/661266ba16e06f63962b306a7c30d25f37920c2d
OrthogonalHouseholder
import math import torch import torch.nn as nn class OrthogonalHouseholder(nn.Module): def __init__(self, sz, bias=True): super(OrthogonalHouseholder, self).__init__() self.sz = sz self.bias = bias self.A = nn.Parameter(torch.empty((sz, sz))) self.b = nn.Parameter(torch.empty(sz)) if bias else 0.0 self.reset_parameters() def reset_parameters(self): with torch.no_grad(): self.A.normal_(0, math.sqrt(2 / self.sz)) if self.bias: self.b.fill_(0.0) def forward(self, x): norms_sq = torch.einsum('ij,ij->i', self.A, self.A) for i in range(self.sz): x = x - 2 * self.A[i] * (x @ self.A[i].unsqueeze(1)) / norms_sq[i] return x + self.b def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'sz': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp2 = 2.0 tmp3 = tmp1 * tmp2 tmp5 = tmp3 * tmp4 tmp8 = tmp5 / tmp7 tmp9 = tmp0 - tmp8 tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_div_mul_sub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + 1) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp2 = 2.0 tmp3 = tmp1 * tmp2 tmp5 = tmp3 * tmp4 tmp8 = tmp5 / tmp7 tmp9 = tmp0 - tmp8 tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_div_mul_sub_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + 2) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp2 = 2.0 tmp3 = tmp1 * tmp2 tmp5 = tmp3 * tmp4 tmp8 = tmp5 / tmp7 tmp9 = tmp0 - tmp8 tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_add_div_mul_sub_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + 3) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = 2.0 tmp3 = tmp1 * tmp2 tmp5 = tmp3 * tmp4 tmp8 = tmp5 / tmp7 tmp9 = tmp0 - tmp8 tmp11 = tmp9 + tmp10 tl.store(out_ptr0 + x2, tmp11, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 0), reinterpret_tensor(primals_1, (4, 4, 1), (4, 1, 1), 0), out=buf0) buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 1), 0), out=buf1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_mul_sub_0[grid(256)](primals_2, primals_1, buf1, buf0, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 1), 4), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_div_mul_sub_1[grid(256)](buf2, primals_1, buf3, buf0, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 1), 8), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_div_mul_sub_2[grid(256)](buf4, primals_1, buf5, buf0, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 1), 12), out=buf7) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_mul_sub_3[grid(256)](buf6, primals_1, buf7, buf0, primals_3, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf8, primals_1, buf1, reinterpret_tensor(buf0, (), (), 0 ), reinterpret_tensor(primals_1, (4,), (1,), 4 ), buf3, reinterpret_tensor(buf0, (), (), 1), reinterpret_tensor( primals_1, (4,), (1,), 8), buf5, reinterpret_tensor(buf0, (), (), 2 ), reinterpret_tensor(primals_1, (4,), (1,), 12 ), buf7, reinterpret_tensor(buf0, (), (), 3), reinterpret_tensor(buf6, (4, 64), (1, 4), 0), reinterpret_tensor(buf4, (4, 64), (1, 4), 0 ), reinterpret_tensor(buf2, (4, 64), (1, 4), 0), reinterpret_tensor( primals_2, (4, 64), (1, 4), 0) class OrthogonalHouseholderNew(nn.Module): def __init__(self, sz, bias=True): super(OrthogonalHouseholderNew, self).__init__() self.sz = sz self.bias = bias self.A = nn.Parameter(torch.empty((sz, sz))) self.b = nn.Parameter(torch.empty(sz)) if bias else 0.0 self.reset_parameters() def reset_parameters(self): with torch.no_grad(): self.A.normal_(0, math.sqrt(2 / self.sz)) if self.bias: self.b.fill_(0.0) def forward(self, input_0): primals_1 = self.A primals_3 = self.b primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
mahkons/orthogonal
OrthogonalHouseholder
false
3,959
[ "MIT" ]
0
19a69134ca9a01ef564eab624b8c1526291770aa
https://github.com/mahkons/orthogonal/tree/19a69134ca9a01ef564eab624b8c1526291770aa
MyLinear
import torch import torch.nn as nn class MyLinear(nn.Module): def __init__(self, in_sz, out_sz, bias=True): super(MyLinear, self).__init__() self.in_sz = in_sz self.out_sz = out_sz self.bias = bias self.W = nn.Parameter(torch.empty((in_sz, out_sz))) self.b = nn.Parameter(torch.empty(1, out_sz)) if bias else 0.0 self.reset_parameters() def forward(self, x): return x @ self.W + self.b def regularization(self): if self.in_sz == self.out_sz: return 2 * ((self.W @ self.W.T - torch.eye(self.in_sz, device= self.W.device)) ** 2).sum() return ((self.W @ self.W.T - torch.eye(self.in_sz, device=self.W. device)) ** 2).sum() + ((self.W.T @ self.W - torch.eye(self. out_sz, device=self.W.device)) ** 2).sum() def reset_parameters(self): with torch.no_grad(): torch.nn.init.orthogonal_(self.W) if self.bias: self.b.fill_(0.0) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_sz': 4, 'out_sz': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_0[grid(256)](buf1, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf1, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0) class MyLinearNew(nn.Module): def __init__(self, in_sz, out_sz, bias=True): super(MyLinearNew, self).__init__() self.in_sz = in_sz self.out_sz = out_sz self.bias = bias self.W = nn.Parameter(torch.empty((in_sz, out_sz))) self.b = nn.Parameter(torch.empty(1, out_sz)) if bias else 0.0 self.reset_parameters() def regularization(self): if self.in_sz == self.out_sz: return 2 * ((self.W @ self.W.T - torch.eye(self.in_sz, device= self.W.device)) ** 2).sum() return ((self.W @ self.W.T - torch.eye(self.in_sz, device=self.W. device)) ** 2).sum() + ((self.W.T @ self.W - torch.eye(self. out_sz, device=self.W.device)) ** 2).sum() def reset_parameters(self): with torch.no_grad(): torch.nn.init.orthogonal_(self.W) if self.bias: self.b.fill_(0.0) def forward(self, input_0): primals_1 = self.W primals_3 = self.b primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
mahkons/orthogonal
MyLinear
false
3,960
[ "MIT" ]
0
19a69134ca9a01ef564eab624b8c1526291770aa
https://github.com/mahkons/orthogonal/tree/19a69134ca9a01ef564eab624b8c1526291770aa
OrthogonalHouseholderAlternative
import math import torch import torch.nn as nn class OrthogonalHouseholderAlternative(nn.Module): def __init__(self, sz, bias=True): super(OrthogonalHouseholderAlternative, self).__init__() self.sz = sz self.bias = bias self.A = nn.Parameter(torch.empty((sz, sz))) self.b = nn.Parameter(torch.empty(sz)) if bias else 0.0 self.reset_parameters() def reset_parameters(self): with torch.no_grad(): self.A.normal_(0, math.sqrt(2 / self.sz)) if self.bias: self.b.fill_(0.0) def _forward_precalc(self): B = self.A @ self.A.T self.diag = torch.diag(B) self.p = self.A.clone() for i in range(self.sz - 1): self.p[i + 1:] = self.p[i + 1:].clone() - (2 * B[i, i + 1:] / self.diag[i + 1:]).unsqueeze(1) * self.p[i].clone() def forward(self, x): self._forward_precalc() B = x @ self.A.T x = x - 2 * B / self.diag @ self.p return x + self.b def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'sz': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_diagonal_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 5 * x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_div_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 3 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (1 + x0), xmask) tmp3 = tl.load(in_ptr1 + (1 + x0), xmask) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp4 = tmp2 / tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_div_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (6 + x0), xmask) tmp3 = tl.load(in_ptr1 + (2 + x0), xmask) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp4 = tmp2 / tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_mul_sub_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex x0 = xindex % 4 tmp10 = tl.load(in_ptr0 + x2, xmask) tmp0 = x1 tmp1 = tl.full([1], 1, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.load(in_ptr0 + x2, tmp2 & xmask, other=0.0) tmp4 = tl.load(in_ptr1 + (-1 + x1), tmp2 & xmask, eviction_policy= 'evict_last', other=0.0) tmp5 = tl.load(in_ptr0 + x0, tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp4 * tmp5 tmp7 = tmp3 - tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp2, tmp7, tmp8) tmp11 = tl.where(tmp2, tmp9, tmp10) tl.store(out_ptr0 + x2, tmp11, xmask) @triton.jit def triton_poi_fused_div_mul_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_ptr0 + 11) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp4 = tl.load(in_ptr1 + 3) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp2 = 2.0 tmp3 = tmp1 * tmp2 tmp6 = tmp3 / tmp5 tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp6, None) @triton.jit def triton_poi_fused_mul_sub_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex x0 = xindex % 4 tmp10 = tl.load(in_ptr0 + x2, xmask) tmp0 = x1 tmp1 = tl.full([1], 2, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.load(in_ptr0 + x2, tmp2 & xmask, other=0.0) tmp4 = tl.load(in_ptr1 + (-2 + x1), tmp2 & xmask, eviction_policy= 'evict_last', other=0.0) tmp5 = tl.load(in_ptr0 + (4 + x0), tmp2 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp4 * tmp5 tmp7 = tmp3 - tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp2, tmp7, tmp8) tmp11 = tl.where(tmp2, tmp9, tmp10) tl.store(out_ptr0 + x2, tmp11, xmask) @triton.jit def triton_poi_fused_mul_sub_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex x0 = xindex % 4 tmp4 = tl.load(in_ptr1 + 0) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp11 = tl.load(in_ptr0 + x2, xmask) tmp0 = x1 tmp1 = tl.full([1], 3, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.load(in_ptr0 + x2, tmp2 & xmask, other=0.0) tmp6 = tl.load(in_ptr0 + (8 + x0), tmp2 & xmask, eviction_policy= 'evict_last', other=0.0) tmp7 = tmp5 * tmp6 tmp8 = tmp3 - tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp2, tmp8, tmp9) tmp12 = tl.where(tmp2, tmp10, tmp11) tl.store(out_ptr0 + x2, tmp12, xmask) @triton.jit def triton_poi_fused_div_mul_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp4 = tmp2 / tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_sub_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_poi_fused_diagonal_copy_0[grid(4)](buf0, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((3,), (1,), torch.float32) triton_poi_fused_div_mul_1[grid(3)](buf0, buf1, buf2, 3, XBLOCK=4, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((2,), (1,), torch.float32) triton_poi_fused_div_mul_2[grid(2)](buf0, buf1, buf3, 2, XBLOCK=2, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mul_sub_3[grid(16)](primals_1, buf2, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((1,), (1,), torch.float32) triton_poi_fused_div_mul_4[grid(1)](buf0, buf1, buf5, 1, XBLOCK=1, num_warps=1, num_stages=1) buf6 = buf0 del buf0 triton_poi_fused_mul_sub_5[grid(16)](buf4, buf3, buf6, 16, XBLOCK= 16, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mul_sub_6[grid(16)](buf6, buf5, buf7, 16, XBLOCK= 16, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf8) buf9 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf8 triton_poi_fused_div_mul_7[grid(256)](buf9, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf10 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf9, (64, 4), (4, 1), 0), buf7, out=buf10) buf11 = reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf10 triton_poi_fused_add_sub_8[grid(256)](buf11, primals_2, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf11, buf7, buf1, primals_1, buf1, buf2, buf3, reinterpret_tensor( buf4, (4,), (1,), 4), buf5, reinterpret_tensor(buf6, (4,), (1,), 8 ), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), buf9, reinterpret_tensor(buf7, (4, 4), (1, 4), 0) class OrthogonalHouseholderAlternativeNew(nn.Module): def __init__(self, sz, bias=True): super(OrthogonalHouseholderAlternativeNew, self).__init__() self.sz = sz self.bias = bias self.A = nn.Parameter(torch.empty((sz, sz))) self.b = nn.Parameter(torch.empty(sz)) if bias else 0.0 self.reset_parameters() def reset_parameters(self): with torch.no_grad(): self.A.normal_(0, math.sqrt(2 / self.sz)) if self.bias: self.b.fill_(0.0) def _forward_precalc(self): B = self.A @ self.A.T self.diag = torch.diag(B) self.p = self.A.clone() for i in range(self.sz - 1): self.p[i + 1:] = self.p[i + 1:].clone() - (2 * B[i, i + 1:] / self.diag[i + 1:]).unsqueeze(1) * self.p[i].clone() def forward(self, input_0): primals_1 = self.A primals_3 = self.b primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
mahkons/orthogonal
OrthogonalHouseholderAlternative
false
3,961
[ "MIT" ]
0
19a69134ca9a01ef564eab624b8c1526291770aa
https://github.com/mahkons/orthogonal/tree/19a69134ca9a01ef564eab624b8c1526291770aa
Conv2d_depthwise_sep
import torch import torch.nn as nn class Conv2d_depthwise_sep(nn.Module): def __init__(self, nin, nout): super(Conv2d_depthwise_sep, self).__init__() self.depthwise = nn.Conv2d(nin, nin, kernel_size=3, padding=1, groups=nin) self.pointwise = nn.Conv2d(nin, nout, kernel_size=1) def forward(self, x): return self.pointwise(self.depthwise(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nin': 4, 'nout': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_0[grid(256)](buf3, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1 class Conv2d_depthwise_sepNew(nn.Module): def __init__(self, nin, nout): super(Conv2d_depthwise_sepNew, self).__init__() self.depthwise = nn.Conv2d(nin, nin, kernel_size=3, padding=1, groups=nin) self.pointwise = nn.Conv2d(nin, nout, kernel_size=1) def forward(self, input_0): primals_1 = self.depthwise.weight primals_2 = self.depthwise.bias primals_4 = self.pointwise.weight primals_5 = self.pointwise.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
maet3608/torchy
Conv2d_depthwise_sep
false
3,962
[ "Apache-2.0" ]
0
8c73732a1d4631bd97bfafdc18e52a22ff5410f7
https://github.com/maet3608/torchy/tree/8c73732a1d4631bd97bfafdc18e52a22ff5410f7
MultiheadAttention
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.parameter import Parameter import torch.utils.checkpoint from torch.nn import Parameter class MultiheadAttention(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__(self, embed_dim, num_heads, attn_dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.attn_dropout = attn_dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads' self.scaling = self.head_dim ** -0.5 self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim)) self.register_parameter('in_proj_bias', None) if bias: self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim)) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) if add_bias_kv: self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) else: self.bias_k = self.bias_v = None self.add_zero_attn = add_zero_attn self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.in_proj_weight) nn.init.xavier_uniform_(self.out_proj.weight) if self.in_proj_bias is not None: nn.init.constant_(self.in_proj_bias, 0.0) nn.init.constant_(self.out_proj.bias, 0.0) if self.bias_k is not None: nn.init.xavier_normal_(self.bias_k) if self.bias_v is not None: nn.init.xavier_normal_(self.bias_v) def forward(self, query, key, value, attn_mask=None): """Input shape: Time x Batch x Channel Self-attention can be implemented by passing in the same arguments for query, key and value. Timesteps can be masked by supplying a T x T mask in the `attn_mask` argument. Padding elements can be excluded from the key by passing a binary ByteTensor (`key_padding_mask`) with shape: batch x src_len, where padding elements are indicated by 1s. """ qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr() kv_same = key.data_ptr() == value.data_ptr() tgt_len, bsz, embed_dim = query.size() assert embed_dim == self.embed_dim assert list(query.size()) == [tgt_len, bsz, embed_dim] assert key.size() == value.size() if qkv_same: q, k, v = self.in_proj_qkv(query) elif kv_same: q = self.in_proj_q(query) if key is None: assert value is None k = v = None else: k, v = self.in_proj_kv(key) else: q = self.in_proj_q(query) k = self.in_proj_k(key) v = self.in_proj_v(value) q *= self.scaling if self.bias_k is not None: assert self.bias_v is not None k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)]) v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)]) if attn_mask is not None: attn_mask = torch.cat([attn_mask, attn_mask.new_zeros( attn_mask.size(0), 1)], dim=1) q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim ).transpose(0, 1) if k is not None: k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim ).transpose(0, 1) if v is not None: v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim ).transpose(0, 1) src_len = k.size(1) if self.add_zero_attn: src_len += 1 k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1) v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1) if attn_mask is not None: attn_mask = torch.cat([attn_mask, attn_mask.new_zeros( attn_mask.size(0), 1)], dim=1) attn_weights = torch.bmm(q, k.transpose(1, 2)) assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] if attn_mask is not None: try: attn_weights += attn_mask.unsqueeze(0) except: None None assert False attn_weights = F.softmax(attn_weights.float(), dim=-1).type_as( attn_weights) attn_weights = F.dropout(attn_weights, p=self.attn_dropout, training=self.training) attn = torch.bmm(attn_weights, v) assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self. head_dim] attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn = self.out_proj(attn) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.sum(dim=1) / self.num_heads return attn, attn_weights def in_proj_qkv(self, query): return self._in_proj(query).chunk(3, dim=-1) def in_proj_kv(self, key): return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1) def in_proj_q(self, query, **kwargs): return self._in_proj(query, end=self.embed_dim, **kwargs) def in_proj_k(self, key): return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim) def in_proj_v(self, value): return self._in_proj(value, start=2 * self.embed_dim) def _in_proj(self, input, start=0, end=None, **kwargs): weight = kwargs.get('weight', self.in_proj_weight) bias = kwargs.get('bias', self.in_proj_bias) weight = weight[start:end, :] if bias is not None: bias = bias[start:end] return F.linear(input, weight, bias) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'embed_dim': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F from torch.nn.parameter import Parameter import torch.utils.checkpoint from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_div_sum_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = xindex // 64 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0 + 256 * x1), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0 + 256 * x1), xmask) tmp5 = tl.load(in_ptr0 + (192 + x0 + 256 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 4), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2) del primals_4 buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_mul_0[grid(64)](buf3, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf4 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 0), 0), reinterpret_tensor(buf1, (16, 1, 16), (1, 1, 16), 0), out=buf4) buf7 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32) triton_per_fused__softmax_1[grid(64)](buf4, buf7, 64, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf4 buf8 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf7, reinterpret_tensor(buf2, (16, 16, 1), (1, 16, 1), 0), out=buf8) buf9 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32) triton_poi_fused_clone_2[grid(4, 16)](buf8, buf9, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) buf10 = reinterpret_tensor(buf8, (16, 4), (4, 1), 0) del buf8 extern_kernels.addmm(primals_7, reinterpret_tensor(buf9, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf10) del primals_7 buf11 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_div_sum_3[grid(256)](buf7, buf11, 256, XBLOCK=128, num_warps=4, num_stages=1) return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0 ), buf11, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf9, (16, 4), (4, 1), 0 ), primals_6, reinterpret_tensor(buf2, (16, 1, 16), (1, 1, 16), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (1, 1, 16), 0 ), reinterpret_tensor(buf1, (16, 16, 1), (1, 16, 1), 0) class MultiheadAttentionNew(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__(self, embed_dim, num_heads, attn_dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.attn_dropout = attn_dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads' self.scaling = self.head_dim ** -0.5 self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim)) self.register_parameter('in_proj_bias', None) if bias: self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim)) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) if add_bias_kv: self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) else: self.bias_k = self.bias_v = None self.add_zero_attn = add_zero_attn self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.in_proj_weight) nn.init.xavier_uniform_(self.out_proj.weight) if self.in_proj_bias is not None: nn.init.constant_(self.in_proj_bias, 0.0) nn.init.constant_(self.out_proj.bias, 0.0) if self.bias_k is not None: nn.init.xavier_normal_(self.bias_k) if self.bias_v is not None: nn.init.xavier_normal_(self.bias_v) def in_proj_qkv(self, query): return self._in_proj(query).chunk(3, dim=-1) def in_proj_kv(self, key): return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1) def in_proj_q(self, query, **kwargs): return self._in_proj(query, end=self.embed_dim, **kwargs) def in_proj_k(self, key): return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim) def in_proj_v(self, value): return self._in_proj(value, start=2 * self.embed_dim) def _in_proj(self, input, start=0, end=None, **kwargs): weight = kwargs.get('weight', self.in_proj_weight) bias = kwargs.get('bias', self.in_proj_bias) weight = weight[start:end, :] if bias is not None: bias = bias[start:end] return F.linear(input, weight, bias) def forward(self, input_0, input_1, input_2): primals_4 = self.in_proj_weight primals_5 = self.in_proj_bias primals_6 = self.out_proj.weight primals_7 = self.out_proj.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
lyh512796310/MMSA
MultiheadAttention
false
3,963
[ "MIT" ]
0
e1735afd1b4e763995ab7aacb001884a7b7146ff
https://github.com/lyh512796310/MMSA/tree/e1735afd1b4e763995ab7aacb001884a7b7146ff
RegWeightedL1Loss
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data def _gather_feat(feat, ind, mask=None): dim = feat.size(2) ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim) feat = feat.gather(1, ind) if mask is not None: mask = mask.unsqueeze(2).expand_as(feat) feat = feat[mask] feat = feat.view(-1, dim) return feat def _tranpose_and_gather_feat(feat, ind): feat = feat.permute(0, 2, 3, 1).contiguous() feat = feat.view(feat.size(0), -1, feat.size(3)) feat = _gather_feat(feat, ind) return feat class RegWeightedL1Loss(nn.Module): def __init__(self): super(RegWeightedL1Loss, self).__init__() def forward(self, output, mask, ind, target): pred = _tranpose_and_gather_feat(output, ind) mask = mask.float() loss = F.l1_loss(pred * mask, target * mask, size_average=False) loss = loss / (mask.sum() + 0.0001) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.ones( [4, 4], dtype=torch.int64), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_div_gather_mul_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r5 = rindex // 4 % 16 r0 = rindex % 4 r2 = rindex // 16 % 4 r4 = rindex tmp0 = tl.load(in_ptr0 + r5, None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + r4, None) tmp9 = tl.load(in_ptr3 + r4, None) tmp1 = tl.full([RBLOCK], 16, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 16), 'index out of bounds: 0 <= tmp4 < 16') tmp6 = tl.load(in_ptr1 + (16 * r0 + 64 * r2 + tmp4 % 16), None, eviction_policy='evict_last') tmp8 = tmp6 * tmp7 tmp10 = tmp9 * tmp7 tmp11 = tmp8 - tmp10 tmp12 = tl_math.abs(tmp11) tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = tl.broadcast_to(tmp7, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = 0.0001 tmp20 = tmp18 + tmp19 tmp21 = tmp15 / tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_add_div_gather_mul_sub_sum_0[grid(1)](buf2, arg1_1, arg0_1, arg2_1, arg3_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 return buf2, def _gather_feat(feat, ind, mask=None): dim = feat.size(2) ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim) feat = feat.gather(1, ind) if mask is not None: mask = mask.unsqueeze(2).expand_as(feat) feat = feat[mask] feat = feat.view(-1, dim) return feat def _tranpose_and_gather_feat(feat, ind): feat = feat.permute(0, 2, 3, 1).contiguous() feat = feat.view(feat.size(0), -1, feat.size(3)) feat = _gather_feat(feat, ind) return feat class RegWeightedL1LossNew(nn.Module): def __init__(self): super(RegWeightedL1LossNew, self).__init__() def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg2_1 = input_1 arg1_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
leobean/CenterNet_simple
RegWeightedL1Loss
false
3,964
[ "MIT" ]
0
13e2eab2c049563afde5defdf90434a310a32d02
https://github.com/leobean/CenterNet_simple/tree/13e2eab2c049563afde5defdf90434a310a32d02
ChannelAttention
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class ChannelAttention(nn.Module): def __init__(self, C): super(ChannelAttention, self).__init__() self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() self.fc1 = nn.Linear(C, int(C / 4)) self.fc2 = nn.Linear(int(C / 4), C) def forward(self, x): avg_pool = F.avg_pool2d(x, kernel_size=x.size()[-1]) avg_pool = avg_pool.permute(0, 2, 3, 1) fc = self.fc1(avg_pool) relu = self.relu(fc) fc = self.fc2(relu).permute(0, 3, 1, 2) atten = self.sigmoid(fc) output = atten * x return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'C': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + x0, tmp32, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = 0.0 tmp7 = tmp5 <= tmp6 tl.store(in_out_ptr0 + x0, tmp5, xmask) tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + x2, xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (4, 1), (1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(16)](primals_1, buf0, 16, XBLOCK =16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 4, 4, 4), 0) del buf1 buf5 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(4)](buf2, primals_3, buf5, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (4, 1), (1, 0), 0), reinterpret_tensor(primals_4, (1, 4), (1, 1), 0), alpha =1, beta=1, out=buf3) del primals_5 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_2[grid(256)](buf3, primals_1, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf4, primals_1, reinterpret_tensor(buf0, (4, 4), (4, 1), 0 ), reinterpret_tensor(buf2, (4, 1), (1, 1), 0), buf3, primals_4, buf5 class ChannelAttentionNew(nn.Module): def __init__(self, C): super(ChannelAttentionNew, self).__init__() self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() self.fc1 = nn.Linear(C, int(C / 4)) self.fc2 = nn.Linear(int(C / 4), C) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
leobean/CenterNet_simple
ChannelAttention
false
3,965
[ "MIT" ]
0
13e2eab2c049563afde5defdf90434a310a32d02
https://github.com/leobean/CenterNet_simple/tree/13e2eab2c049563afde5defdf90434a310a32d02
RBF
import torch import torch.nn as nn class RBF(nn.Module): def __init__(self): super(RBF, self).__init__() self.mean = nn.Parameter(torch.Tensor([0.0])) self.std = nn.Parameter(torch.Tensor([1.0])) def forward(self, x): gauss = torch.exp(-(x - self.mean) ** 2 / (2 * self.std ** 2)) return gauss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_exp_mul_neg_pow_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp6 = tl.load(in_ptr2 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp3 = tmp0 - tmp2 tmp4 = tmp3 * tmp3 tmp5 = -tmp4 tmp8 = tmp7 * tmp7 tmp9 = 2.0 tmp10 = tmp8 * tmp9 tmp11 = tmp5 / tmp10 tmp12 = tl_math.exp(tmp11) tl.store(out_ptr0 + x0, tmp12, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_exp_mul_neg_pow_sub_0[grid(256)](primals_2, primals_1, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf0, primals_1, primals_2, primals_3, buf0 class RBFNew(nn.Module): def __init__(self): super(RBFNew, self).__init__() self.mean = nn.Parameter(torch.Tensor([0.0])) self.std = nn.Parameter(torch.Tensor([1.0])) def forward(self, input_0): primals_1 = self.mean primals_3 = self.std primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
maet3608/torchy
RBF
false
3,966
[ "Apache-2.0" ]
0
8c73732a1d4631bd97bfafdc18e52a22ff5410f7
https://github.com/maet3608/torchy/tree/8c73732a1d4631bd97bfafdc18e52a22ff5410f7
RegLoss
import torch import torch.nn as nn import torch.utils.data def _gather_feat(feat, ind, mask=None): dim = feat.size(2) ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim) feat = feat.gather(1, ind) if mask is not None: mask = mask.unsqueeze(2).expand_as(feat) feat = feat[mask] feat = feat.view(-1, dim) return feat def _tranpose_and_gather_feat(feat, ind): feat = feat.permute(0, 2, 3, 1).contiguous() feat = feat.view(feat.size(0), -1, feat.size(3)) feat = _gather_feat(feat, ind) return feat def _reg_loss(regr, gt_regr, mask): """ L1 regression loss Arguments: regr (batch x max_objects x dim) gt_regr (batch x max_objects x dim) mask (batch x max_objects) """ num = mask.float().sum() mask = mask.unsqueeze(2).expand_as(gt_regr).float() regr = regr * mask gt_regr = gt_regr * mask regr_loss = nn.functional.smooth_l1_loss(regr, gt_regr, size_average=False) regr_loss = regr_loss / (num + 0.0001) return regr_loss class RegLoss(nn.Module): """Regression loss for an output tensor Arguments: output (batch x dim x h x w) mask (batch x max_objects) ind (batch x max_objects) target (batch x max_objects x dim) """ def __init__(self): super(RegLoss, self).__init__() def forward(self, output, mask, ind, target): pred = _tranpose_and_gather_feat(output, ind) loss = _reg_loss(pred, target, mask) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4]), torch.ones([4, 4], dtype=torch.int64), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_gather_mul_smooth_l1_loss_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r4 = rindex // 4 % 16 r0 = rindex % 4 r2 = rindex // 16 % 4 r5 = rindex // 16 r6 = rindex tmp0 = tl.load(in_ptr0 + r4, None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + (r0 + 4 * r5), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr3 + r6, None) tmp1 = tl.full([RBLOCK], 16, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 16), 'index out of bounds: 0 <= tmp4 < 16') tmp6 = tl.load(in_ptr1 + (16 * r0 + 64 * r2 + tmp4 % 16), None, eviction_policy='evict_last') tmp8 = tmp6 * tmp7 tmp10 = tmp9 * tmp7 tmp11 = tmp8 - tmp10 tmp12 = tl_math.abs(tmp11) tmp13 = 1.0 tmp14 = tmp12 < tmp13 tmp15 = tmp12 * tmp12 tmp16 = 0.5 tmp17 = tmp15 * tmp16 tmp18 = tmp17 * tmp13 tmp19 = tmp12 - tmp16 tmp20 = tl.where(tmp14, tmp18, tmp19) tmp21 = tl.broadcast_to(tmp20, [RBLOCK]) tmp23 = triton_helpers.promote_to_tensor(tl.sum(tmp21, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp23, None) @triton.jit def triton_per_fused_add_div_sum_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp4 = tl.load(in_out_ptr0 + 0) tmp5 = tl.broadcast_to(tmp4, [XBLOCK, 1]) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp6 = 0.0001 tmp7 = tmp3 + tmp6 tmp8 = tmp5 / tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp8, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_gather_mul_smooth_l1_loss_0[grid(1)](arg1_1, arg0_1, arg2_1, arg3_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg3_1 buf2 = buf0 del buf0 triton_per_fused_add_div_sum_1[grid(1)](buf2, arg2_1, 1, 64, XBLOCK =1, num_warps=2, num_stages=1) del arg2_1 return buf2, def _gather_feat(feat, ind, mask=None): dim = feat.size(2) ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim) feat = feat.gather(1, ind) if mask is not None: mask = mask.unsqueeze(2).expand_as(feat) feat = feat[mask] feat = feat.view(-1, dim) return feat def _tranpose_and_gather_feat(feat, ind): feat = feat.permute(0, 2, 3, 1).contiguous() feat = feat.view(feat.size(0), -1, feat.size(3)) feat = _gather_feat(feat, ind) return feat def _reg_loss(regr, gt_regr, mask): """ L1 regression loss Arguments: regr (batch x max_objects x dim) gt_regr (batch x max_objects x dim) mask (batch x max_objects) """ num = mask.float().sum() mask = mask.unsqueeze(2).expand_as(gt_regr).float() regr = regr * mask gt_regr = gt_regr * mask regr_loss = nn.functional.smooth_l1_loss(regr, gt_regr, size_average=False) regr_loss = regr_loss / (num + 0.0001) return regr_loss class RegLossNew(nn.Module): """Regression loss for an output tensor Arguments: output (batch x dim x h x w) mask (batch x max_objects) ind (batch x max_objects) target (batch x max_objects x dim) """ def __init__(self): super(RegLossNew, self).__init__() def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg2_1 = input_1 arg1_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
leobean/CenterNet_simple
RegLoss
false
3,967
[ "MIT" ]
0
13e2eab2c049563afde5defdf90434a310a32d02
https://github.com/leobean/CenterNet_simple/tree/13e2eab2c049563afde5defdf90434a310a32d02
ScalarBiasScale
import torch import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn import init class ScalarScaleBias(nn.Module): def __init__(self, scale=True, scale_init=1.0, bias=True, bias_init=0.0 ) ->None: super(ScalarScaleBias, self).__init__() if scale: self.weight = Parameter(torch.Tensor(1)) else: self.register_parameter('weight', None) if bias: self.bias = Parameter(torch.Tensor(1)) else: self.register_parameter('bias', None) self.weight_init = scale_init self.bias_init = bias_init self.reset_parameters() def reset_parameters(self) ->None: if self.weight is not None: init.constant_(self.weight, self.weight_init) if self.bias is not None: init.constant_(self.bias, self.bias_init) def forward(self, x): if self.weight is not None: x = x * self.weight if self.bias is not None: x = x + self.bias return x class ScalarBiasScale(ScalarScaleBias): def forward(self, x): if self.bias is not None: x = x + self.bias if self.weight is not None: x = x * self.weight return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn import init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + 0) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp6 = tmp3 * tmp5 tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](primals_2, primals_1, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf0, primals_1, primals_2, primals_3 class ScalarScaleBias(nn.Module): def __init__(self, scale=True, scale_init=1.0, bias=True, bias_init=0.0 ) ->None: super(ScalarScaleBias, self).__init__() if scale: self.weight = Parameter(torch.Tensor(1)) else: self.register_parameter('weight', None) if bias: self.bias = Parameter(torch.Tensor(1)) else: self.register_parameter('bias', None) self.weight_init = scale_init self.bias_init = bias_init self.reset_parameters() def reset_parameters(self) ->None: if self.weight is not None: init.constant_(self.weight, self.weight_init) if self.bias is not None: init.constant_(self.bias, self.bias_init) def forward(self, x): if self.weight is not None: x = x * self.weight if self.bias is not None: x = x + self.bias return x class ScalarBiasScaleNew(ScalarScaleBias): def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
maltanar/logicnets-1
ScalarBiasScale
false
3,968
[ "Apache-2.0" ]
0
0afa2aa5b39cb484db0fcaa542e55c8cbe586119
https://github.com/maltanar/logicnets-1/tree/0afa2aa5b39cb484db0fcaa542e55c8cbe586119
Conv2d_spatial_sep
import torch import torch.nn as nn class Conv2d_spatial_sep(nn.Module): def __init__(self, nin, nout): super(Conv2d_spatial_sep, self).__init__() self.conv1 = nn.Conv2d(nin, 1, kernel_size=(1, 3), groups=1, padding=0) self.conv2 = nn.Conv2d(1, nout, kernel_size=(3, 1), groups=1, padding=1 ) def forward(self, x): return self.conv2(self.conv1(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nin': 4, 'nout': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 3), (12, 3, 3, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 1, 3, 1), (3, 3, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 2), (8, 8, 2, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(32)](buf1, primals_2, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(256)](buf3, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1 class Conv2d_spatial_sepNew(nn.Module): def __init__(self, nin, nout): super(Conv2d_spatial_sepNew, self).__init__() self.conv1 = nn.Conv2d(nin, 1, kernel_size=(1, 3), groups=1, padding=0) self.conv2 = nn.Conv2d(1, nout, kernel_size=(3, 1), groups=1, padding=1 ) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
maet3608/torchy
Conv2d_spatial_sep
false
3,969
[ "Apache-2.0" ]
0
8c73732a1d4631bd97bfafdc18e52a22ff5410f7
https://github.com/maet3608/torchy/tree/8c73732a1d4631bd97bfafdc18e52a22ff5410f7
Conv_Block
import torch import torch.nn as nn import torch.utils.data from torch.nn import functional as F class Conv_Block(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, padding, stride, pool_kernel_size=(2, 2)): super(Conv_Block, self).__init__() self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size, padding, stride) self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size, padding, stride) self.pool = nn.MaxPool2d(pool_kernel_size) def forward(self, x): x = F.elu(self.conv1(x)) x = F.elu(self.conv2(x)) x = self.pool(x) return x def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4, 'padding': 4, 'stride': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 4 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(out_ptr0 + x3, tmp9, None) @triton.jit def triton_poi_fused_convolution_elu_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4, 4), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 16, 16), (1024, 256, 16, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch .float32) get_raw_stream(0) triton_poi_fused_convolution_elu_0[grid(4096)](buf1, primals_2, buf2, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(4, 4), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_convolution_elu_1[grid(256)](buf4, primals_5, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_2[grid(64)](buf5, buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf6, primals_1, primals_3, primals_4, buf1, buf2, buf4, buf5, buf7 class Conv_BlockNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, padding, stride, pool_kernel_size=(2, 2)): super(Conv_BlockNew, self).__init__() self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size, padding, stride) self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size, padding, stride) self.pool = nn.MaxPool2d(pool_kernel_size) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
majedelhelou/PriorLearning
Conv_Block
false
3,970
[ "MIT" ]
0
f66d25993c3b99dd31d9d62abeb3e0a5623e034d
https://github.com/majedelhelou/PriorLearning/tree/f66d25993c3b99dd31d9d62abeb3e0a5623e034d
ScalarScaleBias
import torch import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn import init class ScalarScaleBias(nn.Module): def __init__(self, scale=True, scale_init=1.0, bias=True, bias_init=0.0 ) ->None: super(ScalarScaleBias, self).__init__() if scale: self.weight = Parameter(torch.Tensor(1)) else: self.register_parameter('weight', None) if bias: self.bias = Parameter(torch.Tensor(1)) else: self.register_parameter('bias', None) self.weight_init = scale_init self.bias_init = bias_init self.reset_parameters() def reset_parameters(self) ->None: if self.weight is not None: init.constant_(self.weight, self.weight_init) if self.bias is not None: init.constant_(self.bias, self.bias_init) def forward(self, x): if self.weight is not None: x = x * self.weight if self.bias is not None: x = x + self.bias return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn import init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + 0) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp3 = tmp0 * tmp2 tmp6 = tmp3 + tmp5 tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](primals_2, primals_1, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_3 return buf0, primals_2 class ScalarScaleBiasNew(nn.Module): def __init__(self, scale=True, scale_init=1.0, bias=True, bias_init=0.0 ) ->None: super(ScalarScaleBiasNew, self).__init__() if scale: self.weight = Parameter(torch.Tensor(1)) else: self.register_parameter('weight', None) if bias: self.bias = Parameter(torch.Tensor(1)) else: self.register_parameter('bias', None) self.weight_init = scale_init self.bias_init = bias_init self.reset_parameters() def reset_parameters(self) ->None: if self.weight is not None: init.constant_(self.weight, self.weight_init) if self.bias is not None: init.constant_(self.bias, self.bias_init) def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
maltanar/logicnets-1
ScalarScaleBias
false
3,971
[ "Apache-2.0" ]
0
0afa2aa5b39cb484db0fcaa542e55c8cbe586119
https://github.com/maltanar/logicnets-1/tree/0afa2aa5b39cb484db0fcaa542e55c8cbe586119
DiceLoss
import torch import torch.nn as nn class DiceLoss(nn.Module): def __init__(self, weight=None, size_average=True): super(DiceLoss, self).__init__() def forward(self, inputs, targets, smooth=1): inputs = inputs.contiguous() targets = targets.contiguous() intersection = (inputs * targets).sum(dim=2).sum(dim=2) dice = (2.0 * intersection + smooth) / (inputs.sum(dim=2).sum(dim=2 ) + targets.sum(dim=2).sum(dim=2) + smooth) loss = 1 - dice return loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mean_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 16 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 16 * r0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + 16 * r0), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (4 + 16 * r0), None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (8 + 16 * r0), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (8 + 16 * r0), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (12 + 16 * r0), None, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr1 + (12 + 16 * r0), None, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (1 + 16 * r0), None, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr1 + (1 + 16 * r0), None, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (5 + 16 * r0), None, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (5 + 16 * r0), None, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr0 + (9 + 16 * r0), None, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr1 + (9 + 16 * r0), None, eviction_policy='evict_last' ) tmp26 = tl.load(in_ptr0 + (13 + 16 * r0), None, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr1 + (13 + 16 * r0), None, eviction_policy= 'evict_last') tmp31 = tl.load(in_ptr0 + (2 + 16 * r0), None, eviction_policy='evict_last' ) tmp32 = tl.load(in_ptr1 + (2 + 16 * r0), None, eviction_policy='evict_last' ) tmp34 = tl.load(in_ptr0 + (6 + 16 * r0), None, eviction_policy='evict_last' ) tmp35 = tl.load(in_ptr1 + (6 + 16 * r0), None, eviction_policy='evict_last' ) tmp38 = tl.load(in_ptr0 + (10 + 16 * r0), None, eviction_policy= 'evict_last') tmp39 = tl.load(in_ptr1 + (10 + 16 * r0), None, eviction_policy= 'evict_last') tmp42 = tl.load(in_ptr0 + (14 + 16 * r0), None, eviction_policy= 'evict_last') tmp43 = tl.load(in_ptr1 + (14 + 16 * r0), None, eviction_policy= 'evict_last') tmp47 = tl.load(in_ptr0 + (3 + 16 * r0), None, eviction_policy='evict_last' ) tmp48 = tl.load(in_ptr1 + (3 + 16 * r0), None, eviction_policy='evict_last' ) tmp50 = tl.load(in_ptr0 + (7 + 16 * r0), None, eviction_policy='evict_last' ) tmp51 = tl.load(in_ptr1 + (7 + 16 * r0), None, eviction_policy='evict_last' ) tmp54 = tl.load(in_ptr0 + (11 + 16 * r0), None, eviction_policy= 'evict_last') tmp55 = tl.load(in_ptr1 + (11 + 16 * r0), None, eviction_policy= 'evict_last') tmp58 = tl.load(in_ptr0 + (15 + 16 * r0), None, eviction_policy= 'evict_last') tmp59 = tl.load(in_ptr1 + (15 + 16 * r0), None, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp17 = tmp15 * tmp16 tmp20 = tmp18 * tmp19 tmp21 = tmp17 + tmp20 tmp24 = tmp22 * tmp23 tmp25 = tmp21 + tmp24 tmp28 = tmp26 * tmp27 tmp29 = tmp25 + tmp28 tmp30 = tmp14 + tmp29 tmp33 = tmp31 * tmp32 tmp36 = tmp34 * tmp35 tmp37 = tmp33 + tmp36 tmp40 = tmp38 * tmp39 tmp41 = tmp37 + tmp40 tmp44 = tmp42 * tmp43 tmp45 = tmp41 + tmp44 tmp46 = tmp30 + tmp45 tmp49 = tmp47 * tmp48 tmp52 = tmp50 * tmp51 tmp53 = tmp49 + tmp52 tmp56 = tmp54 * tmp55 tmp57 = tmp53 + tmp56 tmp60 = tmp58 * tmp59 tmp61 = tmp57 + tmp60 tmp62 = tmp46 + tmp61 tmp63 = tmp0 + tmp3 tmp64 = tmp63 + tmp7 tmp65 = tmp64 + tmp11 tmp66 = tmp15 + tmp18 tmp67 = tmp66 + tmp22 tmp68 = tmp67 + tmp26 tmp69 = tmp65 + tmp68 tmp70 = tmp31 + tmp34 tmp71 = tmp70 + tmp38 tmp72 = tmp71 + tmp42 tmp73 = tmp69 + tmp72 tmp74 = tmp47 + tmp50 tmp75 = tmp74 + tmp54 tmp76 = tmp75 + tmp58 tmp77 = tmp73 + tmp76 tmp78 = tmp1 + tmp4 tmp79 = tmp78 + tmp8 tmp80 = tmp79 + tmp12 tmp81 = tmp16 + tmp19 tmp82 = tmp81 + tmp23 tmp83 = tmp82 + tmp27 tmp84 = tmp80 + tmp83 tmp85 = tmp32 + tmp35 tmp86 = tmp85 + tmp39 tmp87 = tmp86 + tmp43 tmp88 = tmp84 + tmp87 tmp89 = tmp48 + tmp51 tmp90 = tmp89 + tmp55 tmp91 = tmp90 + tmp59 tmp92 = tmp88 + tmp91 tmp93 = 2.0 tmp94 = tmp62 * tmp93 tmp95 = 1.0 tmp96 = tmp94 + tmp95 tmp97 = tmp77 + tmp92 tmp98 = tmp97 + tmp95 tmp99 = tmp96 / tmp98 tmp100 = tmp95 - tmp99 tmp101 = tl.broadcast_to(tmp100, [XBLOCK, RBLOCK]) tmp103 = tl.sum(tmp101, 1)[:, None] tmp104 = 16.0 tmp105 = tmp103 / tmp104 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp105, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 get_raw_stream(0) triton_per_fused_add_div_mean_mul_rsub_sum_0[grid(1)](buf4, arg0_1, arg1_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf4, class DiceLossNew(nn.Module): def __init__(self, weight=None, size_average=True): super(DiceLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
manuelhz/dissertation
DiceLoss
false
3,972
[ "MIT" ]
0
ca89475f79505dfb6d8a3645ca85451df7fce3b6
https://github.com/manuelhz/dissertation/tree/ca89475f79505dfb6d8a3645ca85451df7fce3b6
OpenPoseLoss
import torch import torch.nn as nn import torch.nn.functional as F class OpenPoseLoss(nn.Module): def __init__(self): super(OpenPoseLoss, self).__init__() def forward(self, saved_for_loss, heatmap_target, heat_mask, paf_target, paf_mask): """ tính loss Parameters ---------- saved_for_loss : Output ofOpenPoseNet (list) heatmap_target : [num_batch, 19, 46, 46] Anotation information heatmap_mask : [num_batch, 19, 46, 46] paf_target : [num_batch, 38, 46, 46] PAF Anotation paf_mask : [num_batch, 38, 46, 46] PAF mask Returns ------- loss : """ total_loss = 0 for j in range(6): pred1 = saved_for_loss[2 * j] * paf_mask gt1 = paf_target.float() * paf_mask pred2 = saved_for_loss[2 * j + 1] * heat_mask gt2 = heatmap_target.float() * heat_mask total_loss += F.mse_loss(pred1, gt1, reduction='mean' ) + F.mse_loss(pred2, gt2, reduction='mean') return total_loss def get_inputs(): return [torch.rand([12, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand ([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex % 64 r2 = rindex tmp0 = tl.load(in_ptr0 + r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + r2, None) tmp3 = tl.load(in_ptr2 + r2, None) tmp10 = tl.load(in_ptr0 + (128 + r0), None, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (256 + r0), None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr0 + (384 + r0), None, eviction_policy='evict_last') tmp31 = tl.load(in_ptr0 + (512 + r0), None, eviction_policy='evict_last') tmp38 = tl.load(in_ptr0 + (640 + r0), None, eviction_policy='evict_last') tmp45 = tl.load(in_ptr0 + (64 + r0), None, eviction_policy='evict_last') tmp46 = tl.load(in_ptr3 + r2, None) tmp48 = tl.load(in_ptr4 + r2, None) tmp55 = tl.load(in_ptr0 + (192 + r0), None, eviction_policy='evict_last') tmp62 = tl.load(in_ptr0 + (320 + r0), None, eviction_policy='evict_last') tmp69 = tl.load(in_ptr0 + (448 + r0), None, eviction_policy='evict_last') tmp76 = tl.load(in_ptr0 + (576 + r0), None, eviction_policy='evict_last') tmp83 = tl.load(in_ptr0 + (704 + r0), None, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 - tmp4 tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp11 = tmp10 * tmp1 tmp12 = tmp11 - tmp4 tmp13 = tmp12 * tmp12 tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp18 = tmp17 * tmp1 tmp19 = tmp18 - tmp4 tmp20 = tmp19 * tmp19 tmp21 = tl.broadcast_to(tmp20, [RBLOCK]) tmp23 = triton_helpers.promote_to_tensor(tl.sum(tmp21, 0)) tmp25 = tmp24 * tmp1 tmp26 = tmp25 - tmp4 tmp27 = tmp26 * tmp26 tmp28 = tl.broadcast_to(tmp27, [RBLOCK]) tmp30 = triton_helpers.promote_to_tensor(tl.sum(tmp28, 0)) tmp32 = tmp31 * tmp1 tmp33 = tmp32 - tmp4 tmp34 = tmp33 * tmp33 tmp35 = tl.broadcast_to(tmp34, [RBLOCK]) tmp37 = triton_helpers.promote_to_tensor(tl.sum(tmp35, 0)) tmp39 = tmp38 * tmp1 tmp40 = tmp39 - tmp4 tmp41 = tmp40 * tmp40 tmp42 = tl.broadcast_to(tmp41, [RBLOCK]) tmp44 = triton_helpers.promote_to_tensor(tl.sum(tmp42, 0)) tmp47 = tmp45 * tmp46 tmp49 = tmp48 * tmp46 tmp50 = tmp47 - tmp49 tmp51 = tmp50 * tmp50 tmp52 = tl.broadcast_to(tmp51, [RBLOCK]) tmp54 = triton_helpers.promote_to_tensor(tl.sum(tmp52, 0)) tmp56 = tmp55 * tmp46 tmp57 = tmp56 - tmp49 tmp58 = tmp57 * tmp57 tmp59 = tl.broadcast_to(tmp58, [RBLOCK]) tmp61 = triton_helpers.promote_to_tensor(tl.sum(tmp59, 0)) tmp63 = tmp62 * tmp46 tmp64 = tmp63 - tmp49 tmp65 = tmp64 * tmp64 tmp66 = tl.broadcast_to(tmp65, [RBLOCK]) tmp68 = triton_helpers.promote_to_tensor(tl.sum(tmp66, 0)) tmp70 = tmp69 * tmp46 tmp71 = tmp70 - tmp49 tmp72 = tmp71 * tmp71 tmp73 = tl.broadcast_to(tmp72, [RBLOCK]) tmp75 = triton_helpers.promote_to_tensor(tl.sum(tmp73, 0)) tmp77 = tmp76 * tmp46 tmp78 = tmp77 - tmp49 tmp79 = tmp78 * tmp78 tmp80 = tl.broadcast_to(tmp79, [RBLOCK]) tmp82 = triton_helpers.promote_to_tensor(tl.sum(tmp80, 0)) tmp84 = tmp83 * tmp46 tmp85 = tmp84 - tmp49 tmp86 = tmp85 * tmp85 tmp87 = tl.broadcast_to(tmp86, [RBLOCK]) tmp89 = triton_helpers.promote_to_tensor(tl.sum(tmp87, 0)) tmp90 = 256.0 tmp91 = tmp9 / tmp90 tmp92 = tmp54 / tmp90 tmp93 = tmp91 + tmp92 tmp94 = 0.0 tmp95 = tmp93 + tmp94 tmp96 = tmp16 / tmp90 tmp97 = tmp61 / tmp90 tmp98 = tmp96 + tmp97 tmp99 = tmp95 + tmp98 tmp100 = tmp23 / tmp90 tmp101 = tmp68 / tmp90 tmp102 = tmp100 + tmp101 tmp103 = tmp99 + tmp102 tmp104 = tmp30 / tmp90 tmp105 = tmp75 / tmp90 tmp106 = tmp104 + tmp105 tmp107 = tmp103 + tmp106 tmp108 = tmp37 / tmp90 tmp109 = tmp82 / tmp90 tmp110 = tmp108 + tmp109 tmp111 = tmp107 + tmp110 tmp112 = tmp44 / tmp90 tmp113 = tmp89 / tmp90 tmp114 = tmp112 + tmp113 tmp115 = tmp111 + tmp114 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp115, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1, arg4_1 = args args.clear() assert_size_stride(arg0_1, (12, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf10 = buf0 del buf0 buf13 = buf10 del buf10 get_raw_stream(0) triton_per_fused_add_mse_loss_mul_0[grid(1)](buf13, arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 del arg4_1 return buf13, class OpenPoseLossNew(nn.Module): def __init__(self): super(OpenPoseLossNew, self).__init__() def forward(self, input_0, input_1, input_2, input_3, input_4): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 arg4_1 = input_4 output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1]) return output[0]
makotovnjp/Talent5OpenPose
OpenPoseLoss
false
3,973
[ "Apache-2.0" ]
0
1ebbbd4f226b6839d7d1627d6c33edd416c137fc
https://github.com/makotovnjp/Talent5OpenPose/tree/1ebbbd4f226b6839d7d1627d6c33edd416c137fc
MLP
import torch import numpy as np from torch import nn from torch.nn import functional as F class MLP(nn.Module): def __init__(self, input_shape, n_layers, n_units): super().__init__() self._layers = [] n_in = int(np.prod(np.array(input_shape))) for i in range(n_layers): layer = nn.Linear(n_in, n_units) self.add_module('hidden_layer_{}'.format(i + 1), layer) n_in = n_units self._layers.append(layer) def forward(self, x): h = x.reshape(x.shape[0], -1) for layer in self._layers: h = F.relu(layer(h)) return h def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_shape': 4, 'n_layers': 1, 'n_units': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16)](buf1, primals_3, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 return buf1, primals_1, buf2 class MLPNew(nn.Module): def __init__(self, input_shape, n_layers, n_units): super().__init__() self._layers = [] n_in = int(np.prod(np.array(input_shape))) for i in range(n_layers): layer = nn.Linear(n_in, n_units) self.add_module('hidden_layer_{}'.format(i + 1), layer) n_in = n_units self._layers.append(layer) def forward(self, input_0): primals_1 = self.hidden_layer_1.weight primals_3 = self.hidden_layer_1.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
manfreddiaz/rl-laplacian
MLP
false
3,974
[ "MIT" ]
0
034803adb5c20c3bb7822b18d675b762fdcc53dc
https://github.com/manfreddiaz/rl-laplacian/tree/034803adb5c20c3bb7822b18d675b762fdcc53dc
PSNRLoss
import torch import torch.nn as nn from torch.nn.functional import mse_loss def psnr_loss(input: 'torch.Tensor', target: 'torch.Tensor', max_val: 'float' ) ->torch.Tensor: """Function that computes PSNR See :class:`~kornia.losses.PSNRLoss` for details. """ if not torch.is_tensor(input) or not torch.is_tensor(target): raise TypeError( f'Expected 2 torch tensors but got {type(input)} and {type(target)}' ) if input.shape != target.shape: raise TypeError( f'Expected tensors of equal shapes, but got {input.shape} and {target.shape}' ) mse_val = mse_loss(input, target, reduction='mean') max_val_tensor: 'torch.Tensor' = torch.tensor(max_val).to(input.device).to( input.dtype) return 10 * torch.log10(max_val_tensor * max_val_tensor / mse_val) class PSNRLoss(nn.Module): """Creates a criterion that calculates the PSNR between 2 images. Given an m x n image, the PSNR is: .. math:: \\text{PSNR} = 10 \\log_{10} \\bigg(\\frac{\\text{MAX}_I^2}{MSE(I,T)}\\bigg) where .. math:: \\text{MSE}(I,T) = \\frac{1}{mn}\\sum_{i=0}^{m-1}\\sum_{j=0}^{n-1} [I(i,j) - T(i,j)]^2 and :math:`\\text{MAX}_I` is the maximum possible input value (e.g for floating point images :math:`\\text{MAX}_I=1`). Arguments: max_val (float): Maximum value of input Shape: - input: :math:`(*)` - approximation: :math:`(*)` same shape as input - output: :math:`()` a scalar Examples: >>> kornia.losses.psnr_loss(torch.ones(1), 1.2*torch.ones(1), 2) tensor(20.0000) # 10 * log(4/((1.2-1)**2)) / log(10) Reference: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio#Definition """ def __init__(self, max_val: 'float') ->None: super(PSNRLoss, self).__init__() self.max_val: 'float' = max_val def forward(self, input: 'torch.Tensor', target: 'torch.Tensor' ) ->torch.Tensor: return psnr_loss(input, target, self.max_val) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'max_val': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn.functional import mse_loss assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_log10_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 16.0 tmp10 = tmp9 / tmp8 tmp11 = libdevice.log10(tmp10) tmp12 = 10.0 tmp13 = tmp11 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp13, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_div_log10_mse_loss_mul_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def psnr_loss(input: 'torch.Tensor', target: 'torch.Tensor', max_val: 'float' ) ->torch.Tensor: """Function that computes PSNR See :class:`~kornia.losses.PSNRLoss` for details. """ if not torch.is_tensor(input) or not torch.is_tensor(target): raise TypeError( f'Expected 2 torch tensors but got {type(input)} and {type(target)}' ) if input.shape != target.shape: raise TypeError( f'Expected tensors of equal shapes, but got {input.shape} and {target.shape}' ) mse_val = mse_loss(input, target, reduction='mean') max_val_tensor: 'torch.Tensor' = torch.tensor(max_val).to(input.device).to( input.dtype) return 10 * torch.log10(max_val_tensor * max_val_tensor / mse_val) class PSNRLossNew(nn.Module): """Creates a criterion that calculates the PSNR between 2 images. Given an m x n image, the PSNR is: .. math:: \\text{PSNR} = 10 \\log_{10} \\bigg(\\frac{\\text{MAX}_I^2}{MSE(I,T)}\\bigg) where .. math:: \\text{MSE}(I,T) = \\frac{1}{mn}\\sum_{i=0}^{m-1}\\sum_{j=0}^{n-1} [I(i,j) - T(i,j)]^2 and :math:`\\text{MAX}_I` is the maximum possible input value (e.g for floating point images :math:`\\text{MAX}_I=1`). Arguments: max_val (float): Maximum value of input Shape: - input: :math:`(*)` - approximation: :math:`(*)` same shape as input - output: :math:`()` a scalar Examples: >>> kornia.losses.psnr_loss(torch.ones(1), 1.2*torch.ones(1), 2) tensor(20.0000) # 10 * log(4/((1.2-1)**2)) / log(10) Reference: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio#Definition """ def __init__(self, max_val: 'float') ->None: super(PSNRLossNew, self).__init__() self.max_val: 'float' = max_val def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
manyids2/kornia-1
PSNRLoss
false
3,975
[ "ECL-2.0", "Apache-2.0" ]
0
47f5e91f502a0819be9b5a843019b37b15aa37f2
https://github.com/manyids2/kornia-1/tree/47f5e91f502a0819be9b5a843019b37b15aa37f2
img_encoder
import torch import torch.nn as nn import torch.nn.functional as F class resnet_block(nn.Module): def __init__(self, dim_in, dim_out): super(resnet_block, self).__init__() self.dim_in = dim_in self.dim_out = dim_out if self.dim_in == self.dim_out: self.conv_1 = nn.Conv2d(self.dim_in, self.dim_out, 3, stride=1, padding=1, bias=False) self.conv_2 = nn.Conv2d(self.dim_out, self.dim_out, 3, stride=1, padding=1, bias=False) nn.init.xavier_uniform_(self.conv_1.weight) nn.init.xavier_uniform_(self.conv_2.weight) else: self.conv_1 = nn.Conv2d(self.dim_in, self.dim_out, 3, stride=2, padding=1, bias=False) self.conv_2 = nn.Conv2d(self.dim_out, self.dim_out, 3, stride=1, padding=1, bias=False) self.conv_s = nn.Conv2d(self.dim_in, self.dim_out, 1, stride=2, padding=0, bias=False) nn.init.xavier_uniform_(self.conv_1.weight) nn.init.xavier_uniform_(self.conv_2.weight) nn.init.xavier_uniform_(self.conv_s.weight) def forward(self, input, is_training=False): if self.dim_in == self.dim_out: output = self.conv_1(input) output = F.leaky_relu(output, negative_slope=0.01, inplace=True) output = self.conv_2(output) output = output + input output = F.leaky_relu(output, negative_slope=0.01, inplace=True) else: output = self.conv_1(input) output = F.leaky_relu(output, negative_slope=0.01, inplace=True) output = self.conv_2(output) input_ = self.conv_s(input) output = output + input_ output = F.leaky_relu(output, negative_slope=0.01, inplace=True) return output class img_encoder(nn.Module): def __init__(self, img_ef_dim, z_dim): super(img_encoder, self).__init__() self.img_ef_dim = img_ef_dim self.z_dim = z_dim self.conv_0 = nn.Conv2d(1, self.img_ef_dim, 7, stride=2, padding=3, bias=False) self.res_1 = resnet_block(self.img_ef_dim, self.img_ef_dim) self.res_2 = resnet_block(self.img_ef_dim, self.img_ef_dim) self.res_3 = resnet_block(self.img_ef_dim, self.img_ef_dim * 2) self.res_4 = resnet_block(self.img_ef_dim * 2, self.img_ef_dim * 2) self.res_5 = resnet_block(self.img_ef_dim * 2, self.img_ef_dim * 4) self.res_6 = resnet_block(self.img_ef_dim * 4, self.img_ef_dim * 4) self.res_7 = resnet_block(self.img_ef_dim * 4, self.img_ef_dim * 8) self.res_8 = resnet_block(self.img_ef_dim * 8, self.img_ef_dim * 8) self.conv_9 = nn.Conv2d(self.img_ef_dim * 8, self.img_ef_dim * 16, 4, stride=2, padding=1, bias=True) self.conv_10 = nn.Conv2d(self.img_ef_dim * 16, self.img_ef_dim * 16, 4, stride=1, padding=0, bias=True) self.linear_1 = nn.Linear(self.img_ef_dim * 16, self.img_ef_dim * 16, bias=True) self.linear_2 = nn.Linear(self.img_ef_dim * 16, self.img_ef_dim * 16, bias=True) self.linear_3 = nn.Linear(self.img_ef_dim * 16, self.img_ef_dim * 16, bias=True) self.linear_4 = nn.Linear(self.img_ef_dim * 16, self.z_dim, bias=True) nn.init.xavier_uniform_(self.conv_0.weight) nn.init.xavier_uniform_(self.conv_9.weight) nn.init.constant_(self.conv_9.bias, 0) nn.init.xavier_uniform_(self.conv_10.weight) nn.init.constant_(self.conv_10.bias, 0) nn.init.xavier_uniform_(self.linear_1.weight) nn.init.constant_(self.linear_1.bias, 0) nn.init.xavier_uniform_(self.linear_2.weight) nn.init.constant_(self.linear_2.bias, 0) nn.init.xavier_uniform_(self.linear_3.weight) nn.init.constant_(self.linear_3.bias, 0) nn.init.xavier_uniform_(self.linear_4.weight) nn.init.constant_(self.linear_4.bias, 0) def forward(self, view, is_training=False): layer_0 = self.conv_0(1 - view) layer_0 = F.leaky_relu(layer_0, negative_slope=0.01, inplace=True) layer_1 = self.res_1(layer_0, is_training=is_training) layer_2 = self.res_2(layer_1, is_training=is_training) layer_3 = self.res_3(layer_2, is_training=is_training) layer_4 = self.res_4(layer_3, is_training=is_training) layer_5 = self.res_5(layer_4, is_training=is_training) layer_6 = self.res_6(layer_5, is_training=is_training) layer_7 = self.res_7(layer_6, is_training=is_training) layer_8 = self.res_8(layer_7, is_training=is_training) layer_9 = self.conv_9(layer_8) layer_9 = F.leaky_relu(layer_9, negative_slope=0.01, inplace=True) layer_10 = self.conv_10(layer_9) layer_10 = layer_10.view(-1, self.img_ef_dim * 16) layer_10 = F.leaky_relu(layer_10, negative_slope=0.01, inplace=True) l1 = self.linear_1(layer_10) l1 = F.leaky_relu(l1, negative_slope=0.01, inplace=True) l2 = self.linear_2(l1) l2 = F.leaky_relu(l2, negative_slope=0.01, inplace=True) l3 = self.linear_3(l2) l3 = F.leaky_relu(l3, negative_slope=0.01, inplace=True) l4 = self.linear_4(l3) l4 = torch.sigmoid(l4) return l4 def get_inputs(): return [torch.rand([4, 1, 128, 128])] def get_init_inputs(): return [[], {'img_ef_dim': 4, 'z_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_rsub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tl.store(out_ptr0 + x0, tmp2, None) @triton.jit def triton_poi_fused_leaky_relu_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.01 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(in_out_ptr0 + x0, tmp5, None) @triton.jit def triton_poi_fused_add_leaky_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + x0, None) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x0, tmp7, None) @triton.jit def triton_poi_fused_leaky_relu_3(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.01 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(in_out_ptr0 + x0, tmp5, None) @triton.jit def triton_poi_fused_add_leaky_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + x0, None) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x0, tmp7, None) @triton.jit def triton_poi_fused_leaky_relu_5(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.01 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(in_out_ptr0 + x0, tmp5, None) @triton.jit def triton_poi_fused_add_leaky_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + x0, None) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x0, tmp7, None) @triton.jit def triton_poi_fused_leaky_relu_7(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.01 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(in_out_ptr0 + x0, tmp5, None) @triton.jit def triton_poi_fused_add_leaky_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + x0, None) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x0, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_leaky_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_sigmoid_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33 ) = args args.clear() assert_size_stride(primals_1, (4, 1, 128, 128), (16384, 16384, 128, 1)) assert_size_stride(primals_2, (4, 1, 7, 7), (49, 49, 7, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (8, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_8, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_9, (8, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_10, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_11, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_12, (16, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_13, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_14, (16, 8, 1, 1), (8, 1, 1, 1)) assert_size_stride(primals_15, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_16, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_17, (32, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_18, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_19, (32, 16, 1, 1), (16, 1, 1, 1)) assert_size_stride(primals_20, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_21, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_22, (64, 32, 4, 4), (512, 16, 4, 1)) assert_size_stride(primals_23, (64,), (1,)) assert_size_stride(primals_24, (64, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_25, (64,), (1,)) assert_size_stride(primals_26, (64, 64), (64, 1)) assert_size_stride(primals_27, (64,), (1,)) assert_size_stride(primals_28, (64, 64), (64, 1)) assert_size_stride(primals_29, (64,), (1,)) assert_size_stride(primals_30, (64, 64), (64, 1)) assert_size_stride(primals_31, (64,), (1,)) assert_size_stride(primals_32, (4, 64), (64, 1)) assert_size_stride(primals_33, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 128, 128), (16384, 16384, 128, 1), torch.float32) get_raw_stream(0) triton_poi_fused_rsub_0[grid(65536)](primals_1, buf0, 65536, XBLOCK =512, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(2, 2), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 64, 64), (16384, 4096, 64, 1)) buf2 = buf1 del buf1 triton_poi_fused_leaky_relu_1[grid(65536)](buf2, 65536, XBLOCK=256, num_warps=4, num_stages=1) buf3 = extern_kernels.convolution(buf2, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 64, 64), (16384, 4096, 64, 1)) buf4 = buf3 del buf3 triton_poi_fused_leaky_relu_1[grid(65536)](buf4, 65536, XBLOCK=256, num_warps=4, num_stages=1) buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 64, 64), (16384, 4096, 64, 1)) buf6 = buf5 del buf5 triton_poi_fused_add_leaky_relu_2[grid(65536)](buf6, buf2, 65536, XBLOCK=512, num_warps=4, num_stages=1) buf7 = extern_kernels.convolution(buf6, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 64, 64), (16384, 4096, 64, 1)) buf8 = buf7 del buf7 triton_poi_fused_leaky_relu_1[grid(65536)](buf8, 65536, XBLOCK=256, num_warps=4, num_stages=1) buf9 = extern_kernels.convolution(buf8, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 4, 64, 64), (16384, 4096, 64, 1)) buf10 = buf9 del buf9 triton_poi_fused_add_leaky_relu_2[grid(65536)](buf10, buf6, 65536, XBLOCK=512, num_warps=4, num_stages=1) buf11 = extern_kernels.convolution(buf10, primals_7, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 8, 32, 32), (8192, 1024, 32, 1)) buf12 = buf11 del buf11 triton_poi_fused_leaky_relu_3[grid(32768)](buf12, 32768, XBLOCK=256, num_warps=4, num_stages=1) buf13 = extern_kernels.convolution(buf12, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 8, 32, 32), (8192, 1024, 32, 1)) buf14 = extern_kernels.convolution(buf10, primals_9, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 8, 32, 32), (8192, 1024, 32, 1)) buf15 = buf13 del buf13 triton_poi_fused_add_leaky_relu_4[grid(32768)](buf15, buf14, 32768, XBLOCK=256, num_warps=4, num_stages=1) del buf14 buf16 = extern_kernels.convolution(buf15, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 8, 32, 32), (8192, 1024, 32, 1)) buf17 = buf16 del buf16 triton_poi_fused_leaky_relu_3[grid(32768)](buf17, 32768, XBLOCK=256, num_warps=4, num_stages=1) buf18 = extern_kernels.convolution(buf17, primals_11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 8, 32, 32), (8192, 1024, 32, 1)) buf19 = buf18 del buf18 triton_poi_fused_add_leaky_relu_4[grid(32768)](buf19, buf15, 32768, XBLOCK=256, num_warps=4, num_stages=1) buf20 = extern_kernels.convolution(buf19, primals_12, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 16, 16, 16), (4096, 256, 16, 1)) buf21 = buf20 del buf20 triton_poi_fused_leaky_relu_5[grid(16384)](buf21, 16384, XBLOCK=256, num_warps=4, num_stages=1) buf22 = extern_kernels.convolution(buf21, primals_13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 16, 16, 16), (4096, 256, 16, 1)) buf23 = extern_kernels.convolution(buf19, primals_14, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 16, 16, 16), (4096, 256, 16, 1)) buf24 = buf22 del buf22 triton_poi_fused_add_leaky_relu_6[grid(16384)](buf24, buf23, 16384, XBLOCK=256, num_warps=4, num_stages=1) del buf23 buf25 = extern_kernels.convolution(buf24, primals_15, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf25, (4, 16, 16, 16), (4096, 256, 16, 1)) buf26 = buf25 del buf25 triton_poi_fused_leaky_relu_5[grid(16384)](buf26, 16384, XBLOCK=256, num_warps=4, num_stages=1) buf27 = extern_kernels.convolution(buf26, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 16, 16, 16), (4096, 256, 16, 1)) buf28 = buf27 del buf27 triton_poi_fused_add_leaky_relu_6[grid(16384)](buf28, buf24, 16384, XBLOCK=256, num_warps=4, num_stages=1) buf29 = extern_kernels.convolution(buf28, primals_17, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf29, (4, 32, 8, 8), (2048, 64, 8, 1)) buf30 = buf29 del buf29 triton_poi_fused_leaky_relu_7[grid(8192)](buf30, 8192, XBLOCK=256, num_warps=4, num_stages=1) buf31 = extern_kernels.convolution(buf30, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf31, (4, 32, 8, 8), (2048, 64, 8, 1)) buf32 = extern_kernels.convolution(buf28, primals_19, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 32, 8, 8), (2048, 64, 8, 1)) buf33 = buf31 del buf31 triton_poi_fused_add_leaky_relu_8[grid(8192)](buf33, buf32, 8192, XBLOCK=256, num_warps=4, num_stages=1) del buf32 buf34 = extern_kernels.convolution(buf33, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 32, 8, 8), (2048, 64, 8, 1)) buf35 = buf34 del buf34 triton_poi_fused_leaky_relu_7[grid(8192)](buf35, 8192, XBLOCK=256, num_warps=4, num_stages=1) buf36 = extern_kernels.convolution(buf35, primals_21, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 32, 8, 8), (2048, 64, 8, 1)) buf37 = buf36 del buf36 triton_poi_fused_add_leaky_relu_8[grid(8192)](buf37, buf33, 8192, XBLOCK=256, num_warps=4, num_stages=1) buf38 = extern_kernels.convolution(buf37, primals_22, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 64, 4, 4), (1024, 16, 4, 1)) buf39 = buf38 del buf38 triton_poi_fused_convolution_leaky_relu_9[grid(4096)](buf39, primals_23, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_23 buf40 = extern_kernels.convolution(buf39, primals_24, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 64, 1, 1), (64, 1, 1, 1)) buf41 = reinterpret_tensor(buf40, (4, 64), (64, 1), 0) del buf40 triton_poi_fused_leaky_relu_10[grid(256)](buf41, primals_25, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_25 buf42 = empty_strided_cuda((4, 64), (64, 1), torch.float32) extern_kernels.mm(buf41, reinterpret_tensor(primals_26, (64, 64), ( 1, 64), 0), out=buf42) buf43 = buf42 del buf42 triton_poi_fused_leaky_relu_10[grid(256)](buf43, primals_27, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_27 buf44 = empty_strided_cuda((4, 64), (64, 1), torch.float32) extern_kernels.mm(buf43, reinterpret_tensor(primals_28, (64, 64), ( 1, 64), 0), out=buf44) buf45 = buf44 del buf44 triton_poi_fused_leaky_relu_10[grid(256)](buf45, primals_29, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_29 buf46 = empty_strided_cuda((4, 64), (64, 1), torch.float32) extern_kernels.mm(buf45, reinterpret_tensor(primals_30, (64, 64), ( 1, 64), 0), out=buf46) buf47 = buf46 del buf46 triton_poi_fused_leaky_relu_10[grid(256)](buf47, primals_31, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_31 buf48 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf47, reinterpret_tensor(primals_32, (64, 4), (1, 64), 0), out=buf48) buf49 = buf48 del buf48 triton_poi_fused_sigmoid_11[grid(16)](buf49, primals_33, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_33 return (buf49, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_24, buf0, buf2, buf4, buf6, buf8, buf10, buf12, buf15, buf17, buf19, buf21, buf24, buf26, buf28, buf30, buf33, buf35, buf37, buf39, buf41, buf43, buf45, buf47, buf49, primals_32, primals_30, primals_28, primals_26) class resnet_block(nn.Module): def __init__(self, dim_in, dim_out): super(resnet_block, self).__init__() self.dim_in = dim_in self.dim_out = dim_out if self.dim_in == self.dim_out: self.conv_1 = nn.Conv2d(self.dim_in, self.dim_out, 3, stride=1, padding=1, bias=False) self.conv_2 = nn.Conv2d(self.dim_out, self.dim_out, 3, stride=1, padding=1, bias=False) nn.init.xavier_uniform_(self.conv_1.weight) nn.init.xavier_uniform_(self.conv_2.weight) else: self.conv_1 = nn.Conv2d(self.dim_in, self.dim_out, 3, stride=2, padding=1, bias=False) self.conv_2 = nn.Conv2d(self.dim_out, self.dim_out, 3, stride=1, padding=1, bias=False) self.conv_s = nn.Conv2d(self.dim_in, self.dim_out, 1, stride=2, padding=0, bias=False) nn.init.xavier_uniform_(self.conv_1.weight) nn.init.xavier_uniform_(self.conv_2.weight) nn.init.xavier_uniform_(self.conv_s.weight) def forward(self, input, is_training=False): if self.dim_in == self.dim_out: output = self.conv_1(input) output = F.leaky_relu(output, negative_slope=0.01, inplace=True) output = self.conv_2(output) output = output + input output = F.leaky_relu(output, negative_slope=0.01, inplace=True) else: output = self.conv_1(input) output = F.leaky_relu(output, negative_slope=0.01, inplace=True) output = self.conv_2(output) input_ = self.conv_s(input) output = output + input_ output = F.leaky_relu(output, negative_slope=0.01, inplace=True) return output class img_encoderNew(nn.Module): def __init__(self, img_ef_dim, z_dim): super(img_encoderNew, self).__init__() self.img_ef_dim = img_ef_dim self.z_dim = z_dim self.conv_0 = nn.Conv2d(1, self.img_ef_dim, 7, stride=2, padding=3, bias=False) self.res_1 = resnet_block(self.img_ef_dim, self.img_ef_dim) self.res_2 = resnet_block(self.img_ef_dim, self.img_ef_dim) self.res_3 = resnet_block(self.img_ef_dim, self.img_ef_dim * 2) self.res_4 = resnet_block(self.img_ef_dim * 2, self.img_ef_dim * 2) self.res_5 = resnet_block(self.img_ef_dim * 2, self.img_ef_dim * 4) self.res_6 = resnet_block(self.img_ef_dim * 4, self.img_ef_dim * 4) self.res_7 = resnet_block(self.img_ef_dim * 4, self.img_ef_dim * 8) self.res_8 = resnet_block(self.img_ef_dim * 8, self.img_ef_dim * 8) self.conv_9 = nn.Conv2d(self.img_ef_dim * 8, self.img_ef_dim * 16, 4, stride=2, padding=1, bias=True) self.conv_10 = nn.Conv2d(self.img_ef_dim * 16, self.img_ef_dim * 16, 4, stride=1, padding=0, bias=True) self.linear_1 = nn.Linear(self.img_ef_dim * 16, self.img_ef_dim * 16, bias=True) self.linear_2 = nn.Linear(self.img_ef_dim * 16, self.img_ef_dim * 16, bias=True) self.linear_3 = nn.Linear(self.img_ef_dim * 16, self.img_ef_dim * 16, bias=True) self.linear_4 = nn.Linear(self.img_ef_dim * 16, self.z_dim, bias=True) nn.init.xavier_uniform_(self.conv_0.weight) nn.init.xavier_uniform_(self.conv_9.weight) nn.init.constant_(self.conv_9.bias, 0) nn.init.xavier_uniform_(self.conv_10.weight) nn.init.constant_(self.conv_10.bias, 0) nn.init.xavier_uniform_(self.linear_1.weight) nn.init.constant_(self.linear_1.bias, 0) nn.init.xavier_uniform_(self.linear_2.weight) nn.init.constant_(self.linear_2.bias, 0) nn.init.xavier_uniform_(self.linear_3.weight) nn.init.constant_(self.linear_3.bias, 0) nn.init.xavier_uniform_(self.linear_4.weight) nn.init.constant_(self.linear_4.bias, 0) def forward(self, input_0): primals_2 = self.conv_0.weight primals_3 = self.res_1.conv_1.weight primals_4 = self.res_1.conv_2.weight primals_5 = self.res_2.conv_1.weight primals_6 = self.res_2.conv_2.weight primals_7 = self.res_3.conv_1.weight primals_8 = self.res_3.conv_2.weight primals_9 = self.res_3.conv_s.weight primals_10 = self.res_4.conv_1.weight primals_11 = self.res_4.conv_2.weight primals_12 = self.res_5.conv_1.weight primals_13 = self.res_5.conv_2.weight primals_14 = self.res_5.conv_s.weight primals_15 = self.res_6.conv_1.weight primals_16 = self.res_6.conv_2.weight primals_17 = self.res_7.conv_1.weight primals_18 = self.res_7.conv_2.weight primals_19 = self.res_7.conv_s.weight primals_20 = self.res_8.conv_1.weight primals_21 = self.res_8.conv_2.weight primals_22 = self.conv_9.weight primals_23 = self.conv_9.bias primals_24 = self.conv_10.weight primals_25 = self.conv_10.bias primals_26 = self.linear_1.weight primals_27 = self.linear_1.bias primals_28 = self.linear_2.weight primals_29 = self.linear_2.bias primals_30 = self.linear_3.weight primals_31 = self.linear_3.bias primals_32 = self.linear_4.weight primals_33 = self.linear_4.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33]) return output[0]
luixiao1223/BSP-NET-pytorch
img_encoder
false
3,976
[ "MIT" ]
0
f871c8ce6a9d52ac922e110702c47cd1c89d0a73
https://github.com/luixiao1223/BSP-NET-pytorch/tree/f871c8ce6a9d52ac922e110702c47cd1c89d0a73
ClusterDistance
import torch from torch import nn from typing import Optional class ClusterDistance(nn.Module): def __init__(self, n_classes: 'int', enc_shape: 'int', cluster_centers: 'Optional[torch.Tensor]'=None) ->None: """ :param n_classes: number of clusters :param enc_shape: embedding dimension of feature vectors :param cluster_centers: clusters centers to initialise, if None then use Xavier uniform """ super().__init__() self.enc_shape = enc_shape self.n_classes = n_classes if cluster_centers is None: initial_cluster_centers = torch.zeros(self.n_classes, self. enc_shape, dtype=torch.float) nn.init.xavier_uniform_(initial_cluster_centers) else: initial_cluster_centers = cluster_centers self.cluster_centers = nn.Parameter(initial_cluster_centers) def forward(self, x: 'torch.Tensor') ->torch.Tensor: """ :param x: FloatTensor of [batch size, embedding dimension] :param y: FloatTensor of [batch size,] :return: FloatTensor [batch size, number of clusters] """ return torch.cdist(x, self.cluster_centers) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_classes': 4, 'enc_shape': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn from typing import Optional assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten._cdist_forward.default(primals_2, primals_1, 2.0, None) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64, 4)](buf1, buf2, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) return buf1, primals_1, primals_2, buf2 class ClusterDistanceNew(nn.Module): def __init__(self, n_classes: 'int', enc_shape: 'int', cluster_centers: 'Optional[torch.Tensor]'=None) ->None: """ :param n_classes: number of clusters :param enc_shape: embedding dimension of feature vectors :param cluster_centers: clusters centers to initialise, if None then use Xavier uniform """ super().__init__() self.enc_shape = enc_shape self.n_classes = n_classes if cluster_centers is None: initial_cluster_centers = torch.zeros(self.n_classes, self. enc_shape, dtype=torch.float) nn.init.xavier_uniform_(initial_cluster_centers) else: initial_cluster_centers = cluster_centers self.cluster_centers = nn.Parameter(initial_cluster_centers) def forward(self, input_0): primals_1 = self.cluster_centers primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
marx-alex/Morphelia
ClusterDistance
false
3,977
[ "MIT" ]
0
809278b07f1a535789455d54df3cbddc850d609c
https://github.com/marx-alex/Morphelia/tree/809278b07f1a535789455d54df3cbddc850d609c
Get_gradient_nopadding
import torch import torch.nn as nn import torch.nn.functional as F class Get_gradient_nopadding(nn.Module): def __init__(self): super(Get_gradient_nopadding, self).__init__() kernel_v = [[0, -1, 0], [0, 0, 0], [0, 1, 0]] kernel_h = [[0, 0, 0], [-1, 0, 1], [0, 0, 0]] kernel_h = torch.FloatTensor(kernel_h).unsqueeze(0).unsqueeze(0) kernel_v = torch.FloatTensor(kernel_v).unsqueeze(0).unsqueeze(0) self.weight_h = nn.Parameter(data=kernel_h, requires_grad=False) self.weight_v = nn.Parameter(data=kernel_v, requires_grad=False) def forward(self, x): x_list = [] for i in range(x.shape[1]): x_i = x[:, i] x_i_v = F.conv2d(x_i.unsqueeze(1), self.weight_v, padding=1) x_i_h = F.conv2d(x_i.unsqueeze(1), self.weight_h, padding=1) x_i = torch.sqrt(torch.pow(x_i_v, 2) + torch.pow(x_i_h, 2) + 1e-06) x_list.append(x_i) x = torch.cat(x_list, dim=1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 4 x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp5 * tmp5 tmp7 = tl.load(in_ptr1 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp10 = 1e-06 tmp11 = tmp9 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp4, tmp12, tmp13) tmp15 = tmp0 >= tmp3 tmp16 = tl.full([1], 2, tl.int64) tmp17 = tmp0 < tmp16 tmp18 = tmp15 & tmp17 tmp19 = tl.load(in_ptr2 + (x0 + 16 * x2), tmp18 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tmp19 * tmp19 tmp21 = tl.load(in_ptr3 + (x0 + 16 * x2), tmp18 & xmask, eviction_policy='evict_last', other=0.0) tmp22 = tmp21 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = tmp23 + tmp10 tmp25 = libdevice.sqrt(tmp24) tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp18, tmp25, tmp26) tmp28 = tmp0 >= tmp16 tmp29 = tl.full([1], 3, tl.int64) tmp30 = tmp0 < tmp29 tmp31 = tmp28 & tmp30 tmp32 = tl.load(in_ptr4 + (x0 + 16 * x2), tmp31 & xmask, eviction_policy='evict_last', other=0.0) tmp33 = tmp32 * tmp32 tmp34 = tl.load(in_ptr5 + (x0 + 16 * x2), tmp31 & xmask, eviction_policy='evict_last', other=0.0) tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp36 + tmp10 tmp38 = libdevice.sqrt(tmp37) tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype) tmp40 = tl.where(tmp31, tmp38, tmp39) tmp41 = tmp0 >= tmp29 tl.full([1], 4, tl.int64) tmp44 = tl.load(in_ptr6 + (x0 + 16 * x2), tmp41 & xmask, eviction_policy='evict_last', other=0.0) tmp45 = tmp44 * tmp44 tmp46 = tl.load(in_ptr7 + (x0 + 16 * x2), tmp41 & xmask, eviction_policy='evict_last', other=0.0) tmp47 = tmp46 * tmp46 tmp48 = tmp45 + tmp47 tmp49 = tmp48 + tmp10 tmp50 = libdevice.sqrt(tmp49) tmp51 = tl.full(tmp50.shape, 0.0, tmp50.dtype) tmp52 = tl.where(tmp41, tmp50, tmp51) tmp53 = tl.where(tmp31, tmp40, tmp52) tmp54 = tl.where(tmp18, tmp27, tmp53) tmp55 = tl.where(tmp4, tmp14, tmp54) tl.store(out_ptr0 + x3, tmp55, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (1, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(arg2_1, (1, 1, 3, 3), (9, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1, 4, 4), (64, 0, 4, 1), 0), arg1_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1)) buf1 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1, 4, 4), (64, 0, 4, 1), 0), arg2_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1)) buf2 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1, 4, 4), (64, 0, 4, 1), 16), arg1_1, stride=(1, 1), padding=(1, 1 ), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 4, 4), (16, 16, 4, 1)) buf3 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1, 4, 4), (64, 0, 4, 1), 16), arg2_1, stride=(1, 1), padding=(1, 1 ), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 1, 4, 4), (16, 16, 4, 1)) buf4 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1, 4, 4), (64, 0, 4, 1), 32), arg1_1, stride=(1, 1), padding=(1, 1 ), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 1, 4, 4), (16, 16, 4, 1)) buf5 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1, 4, 4), (64, 0, 4, 1), 32), arg2_1, stride=(1, 1), padding=(1, 1 ), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 1, 4, 4), (16, 16, 4, 1)) buf6 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1, 4, 4), (64, 0, 4, 1), 48), arg1_1, stride=(1, 1), padding=(1, 1 ), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 1, 4, 4), (16, 16, 4, 1)) del arg1_1 buf7 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1, 4, 4), (64, 0, 4, 1), 48), arg2_1, stride=(1, 1), padding=(1, 1 ), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 1, 4, 4), (16, 16, 4, 1)) del arg0_1 del arg2_1 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(256)](buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del buf1 del buf2 del buf3 del buf4 del buf5 del buf6 del buf7 return buf8, class Get_gradient_nopaddingNew(nn.Module): def __init__(self): super(Get_gradient_nopaddingNew, self).__init__() kernel_v = [[0, -1, 0], [0, 0, 0], [0, 1, 0]] kernel_h = [[0, 0, 0], [-1, 0, 1], [0, 0, 0]] kernel_h = torch.FloatTensor(kernel_h).unsqueeze(0).unsqueeze(0) kernel_v = torch.FloatTensor(kernel_v).unsqueeze(0).unsqueeze(0) self.weight_h = nn.Parameter(data=kernel_h, requires_grad=False) self.weight_v = nn.Parameter(data=kernel_v, requires_grad=False) def forward(self, input_0): arg1_1 = self.weight_h arg2_1 = self.weight_v arg0_1 = input_0 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
mansum6/ESRGAN
Get_gradient_nopadding
false
3,978
[ "Apache-2.0" ]
0
8a6b2ce20600840490ee0525cb105617b8e85c73
https://github.com/mansum6/ESRGAN/tree/8a6b2ce20600840490ee0525cb105617b8e85c73
ClusterAssignment
import torch from torch import nn from typing import Optional class ClusterAssignment(nn.Module): def __init__(self, n_classes: 'int', enc_shape: 'int', alpha: 'float'= 1.0, cluster_centers: 'Optional[torch.Tensor]'=None) ->None: """ Module to handle the soft assignment, for a description see in 3.1.1. in Xie/Girshick/Farhadi, where the Student's t-distribution is used to measure similarity between feature vector and each cluster centroid. :param n_classes: number of clusters :param enc_shape: embedding dimension of feature vectors :param alpha: parameter representing the degrees of freedom in the t-distribution, default 1.0 :param cluster_centers: clusters centers to initialise, if None then use Xavier uniform """ super().__init__() self.enc_shape = enc_shape self.n_classes = n_classes self.alpha = alpha if cluster_centers is None: initial_cluster_centers = torch.zeros(self.n_classes, self. enc_shape, dtype=torch.float) nn.init.xavier_uniform_(initial_cluster_centers) else: initial_cluster_centers = cluster_centers self.cluster_centers = nn.Parameter(initial_cluster_centers) def forward(self, batch: 'torch.Tensor') ->torch.Tensor: """ Compute the soft assignment for a batch of feature vectors, returning a batch of assignments for each cluster. :param batch: FloatTensor of [batch size, embedding dimension] :return: FloatTensor [batch size, number of clusters] """ norm_squared = torch.sum((batch.unsqueeze(1) - self.cluster_centers ) ** 2, 2) numerator = 1.0 / (1.0 + norm_squared / self.alpha) power = float(self.alpha + 1) / 2 numerator = numerator ** power return numerator / torch.sum(numerator, dim=1, keepdim=True) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_classes': 4, 'enc_shape': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn from typing import Optional assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_mul_pow_reciprocal_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp12 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp5 = tmp4 - tmp1 tmp6 = tmp5 * tmp5 tmp7 = tmp3 + tmp6 tmp9 = tmp8 - tmp1 tmp10 = tmp9 * tmp9 tmp11 = tmp7 + tmp10 tmp13 = tmp12 - tmp1 tmp14 = tmp13 * tmp13 tmp15 = tmp11 + tmp14 tmp16 = 1.0 tmp17 = tmp15 * tmp16 tmp18 = tmp17 + tmp16 tmp19 = tl.full([1], 1, tl.int32) tmp20 = tmp19 / tmp18 tmp21 = tmp20 * tmp16 tmp22 = tmp21 / tmp21 tl.store(in_out_ptr0 + x2, tmp22, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_div_mul_pow_reciprocal_sub_sum_0[grid(64)](buf1, primals_1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf1, primals_1, primals_2 class ClusterAssignmentNew(nn.Module): def __init__(self, n_classes: 'int', enc_shape: 'int', alpha: 'float'= 1.0, cluster_centers: 'Optional[torch.Tensor]'=None) ->None: """ Module to handle the soft assignment, for a description see in 3.1.1. in Xie/Girshick/Farhadi, where the Student's t-distribution is used to measure similarity between feature vector and each cluster centroid. :param n_classes: number of clusters :param enc_shape: embedding dimension of feature vectors :param alpha: parameter representing the degrees of freedom in the t-distribution, default 1.0 :param cluster_centers: clusters centers to initialise, if None then use Xavier uniform """ super().__init__() self.enc_shape = enc_shape self.n_classes = n_classes self.alpha = alpha if cluster_centers is None: initial_cluster_centers = torch.zeros(self.n_classes, self. enc_shape, dtype=torch.float) nn.init.xavier_uniform_(initial_cluster_centers) else: initial_cluster_centers = cluster_centers self.cluster_centers = nn.Parameter(initial_cluster_centers) def forward(self, input_0): primals_2 = self.cluster_centers primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
marx-alex/Morphelia
ClusterAssignment
false
3,979
[ "MIT" ]
0
809278b07f1a535789455d54df3cbddc850d609c
https://github.com/marx-alex/Morphelia/tree/809278b07f1a535789455d54df3cbddc850d609c
BinaryReg
import torch import torch.utils.data import torch.nn as nn class BinaryReg(nn.Module): """Regularization for encouraging the outputs to be binary. """ def __init__(self, alpha=0.1): super().__init__() self.alpha = alpha def forward(self, pred): diff = pred - 0.5 diff = torch.clamp(torch.abs(diff), min=0.01) loss = (1.0 / diff).mean() return self.alpha * loss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_clamp_mean_mul_reciprocal_sub_0(in_out_ptr0, in_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = 0.5 tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 0.01 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tl.full([1], 1, tl.int32) tmp7 = tmp6 / tmp5 tmp8 = 1.0 tmp9 = tmp7 * tmp8 tmp10 = tl.broadcast_to(tmp9, [RBLOCK]) tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0)) tmp13 = 256.0 tmp14 = tmp12 / tmp13 tmp15 = 0.1 tmp16 = tmp14 * tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_clamp_mean_mul_reciprocal_sub_0[grid(1)](buf1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 return buf1, class BinaryRegNew(nn.Module): """Regularization for encouraging the outputs to be binary. """ def __init__(self, alpha=0.1): super().__init__() self.alpha = alpha def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
matinraayai/pytorch_connectomics
BinaryReg
false
3,980
[ "MIT" ]
0
b11a2f7e71a8d1442fb05f7a6edfaaaa7b0d9205
https://github.com/matinraayai/pytorch_connectomics/tree/b11a2f7e71a8d1442fb05f7a6edfaaaa7b0d9205
Triaffine
import torch import torch.nn as nn class Triaffine(nn.Module): """ Triaffine layer for second-order scoring. This function has a tensor of weights `W` and bias terms if needed. The score `s(x, y, z)` of the vector triple `(x, y, z)` is computed as `x^T z^T W y`. Usually, `x` and `y` can be concatenated with bias terms. References: - Yu Zhang, Zhenghua Li and Min Zhang (ACL'20) Efficient Second-Order TreeCRF for Neural Dependency Parsing https://www.aclweb.org/anthology/2020.acl-main.302/ - Xinyu Wang, Jingxian Huang, and Kewei Tu (ACL'19) Second-Order Semantic Dependency Parsing with End-to-End Neural Networks https://www.aclweb.org/anthology/P19-1454/ Args: n_in (int): The dimension of the input feature. bias_x (bool): If True, add a bias term for tensor x. Default: False. bias_y (bool): If True, add a bias term for tensor y. Default: False. """ def __init__(self, n_in, bias_x=False, bias_y=False): super().__init__() self.n_in = n_in self.bias_x = bias_x self.bias_y = bias_y self.weight = nn.Parameter(torch.Tensor(n_in + bias_x, n_in, n_in + bias_y)) self.reset_parameters() def extra_repr(self): s = f'n_in={self.n_in}' if self.bias_x: s += f', bias_x={self.bias_x}' if self.bias_y: s += f', bias_y={self.bias_y}' return s def reset_parameters(self): nn.init.zeros_(self.weight) def forward(self, x, y, z): """ Args: x (torch.Tensor): [batch_size, seq_len, n_in] y (torch.Tensor): [batch_size, seq_len, n_in] z (torch.Tensor): [batch_size, seq_len, n_in] Returns: s (torch.Tensor): [batch_size, seq_len, seq_len, seq_len] """ if self.bias_x: x = torch.cat((x, torch.ones_like(x[..., :1])), -1) if self.bias_y: y = torch.cat((y, torch.ones_like(y[..., :1])), -1) w = torch.einsum('bzk,ikj->bzij', z, self.weight) s = torch.einsum('bxi,bzij,byj->bzxy', x, w, y) return s def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'n_in': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex % 4 x3 = xindex // 4 y0 = yindex % 4 y1 = yindex // 4 x5 = xindex y4 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x3 + 16 * x2 + 64 * y1), xmask & ymask) tl.store(out_ptr0 + (x5 + 16 * y4), tmp0, xmask & ymask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1, 1), (16, 4, 1, 1, 1), torch. float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((1, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_2, (1, 16, 4), (64, 4, 1), 0), reinterpret_tensor(buf0, (1, 4, 16), (0, 16, 1), 0), out=buf1) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 1, 4, 1), (64, 16, 4, 4, 1, 1), torch.float32) triton_poi_fused_clone_1[grid(256)](buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = reinterpret_tensor(buf1, (4, 4, 16), (64, 16, 1), 0) del buf1 extern_kernels.bmm(primals_4, reinterpret_tensor(buf2, (4, 4, 16), (64, 16, 1), 0), out=buf3) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4, 1, 1), (64, 16, 4, 1, 1, 1), 0) del buf2 triton_poi_fused_clone_2[grid(16, 16)](buf3, buf4, 16, 16, XBLOCK= 16, YBLOCK=16, num_warps=4, num_stages=1) buf5 = buf3 del buf3 extern_kernels.bmm(primals_3, reinterpret_tensor(buf4, (4, 4, 16), (64, 16, 1), 0), out=buf5) del buf4 return reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 4, 1, 16), 0 ), reinterpret_tensor(primals_3, (4, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(primals_4, (4, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(primals_2, (1, 4, 16), (64, 1, 4), 0) class TriaffineNew(nn.Module): """ Triaffine layer for second-order scoring. This function has a tensor of weights `W` and bias terms if needed. The score `s(x, y, z)` of the vector triple `(x, y, z)` is computed as `x^T z^T W y`. Usually, `x` and `y` can be concatenated with bias terms. References: - Yu Zhang, Zhenghua Li and Min Zhang (ACL'20) Efficient Second-Order TreeCRF for Neural Dependency Parsing https://www.aclweb.org/anthology/2020.acl-main.302/ - Xinyu Wang, Jingxian Huang, and Kewei Tu (ACL'19) Second-Order Semantic Dependency Parsing with End-to-End Neural Networks https://www.aclweb.org/anthology/P19-1454/ Args: n_in (int): The dimension of the input feature. bias_x (bool): If True, add a bias term for tensor x. Default: False. bias_y (bool): If True, add a bias term for tensor y. Default: False. """ def __init__(self, n_in, bias_x=False, bias_y=False): super().__init__() self.n_in = n_in self.bias_x = bias_x self.bias_y = bias_y self.weight = nn.Parameter(torch.Tensor(n_in + bias_x, n_in, n_in + bias_y)) self.reset_parameters() def extra_repr(self): s = f'n_in={self.n_in}' if self.bias_x: s += f', bias_x={self.bias_x}' if self.bias_y: s += f', bias_y={self.bias_y}' return s def reset_parameters(self): nn.init.zeros_(self.weight) def forward(self, input_0, input_1, input_2): primals_1 = self.weight primals_2 = input_0 primals_3 = input_1 primals_4 = input_2 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
matejklemen/morphological-dependency-parsing
Triaffine
false
3,981
[ "MIT" ]
0
2ab24b8621debe6e3288ade01c9604a06f9bd453
https://github.com/matejklemen/morphological-dependency-parsing/tree/2ab24b8621debe6e3288ade01c9604a06f9bd453
DiceLoss
import torch import torch.utils.data import torch.nn as nn class DiceLoss(nn.Module): """DICE loss. """ def __init__(self, size_average=True, reduce=True, smooth=100.0, power=1): super(DiceLoss, self).__init__() self.smooth = smooth self.reduce = reduce self.power = power def dice_loss(self, pred, target): loss = 0.0 for index in range(pred.size()[0]): iflat = pred[index].view(-1) tflat = target[index].view(-1) intersection = (iflat * tflat).sum() if self.power == 1: loss += 1 - (2.0 * intersection + self.smooth) / (iflat.sum () + tflat.sum() + self.smooth) else: loss += 1 - (2.0 * intersection + self.smooth) / ((iflat ** self.power).sum() + (tflat ** self.power).sum() + self. smooth) return loss / float(pred.size()[0]) def dice_loss_batch(self, pred, target): iflat = pred.view(-1) tflat = target.view(-1) intersection = (iflat * tflat).sum() if self.power == 1: loss = 1 - (2.0 * intersection + self.smooth) / (iflat.sum() + tflat.sum() + self.smooth) else: loss = 1 - (2.0 * intersection + self.smooth) / ((iflat ** self .power).sum() + (tflat ** self.power).sum() + self.smooth) return loss def forward(self, pred, target): if not target.size() == pred.size(): raise ValueError( 'Target size ({}) must be the same as pred size ({})'. format(target.size(), pred.size())) if self.reduce: loss = self.dice_loss(pred, target) else: loss = self.dice_loss_batch(pred, target) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp12 = tl.load(in_ptr0 + (64 + r0), None) tmp13 = tl.load(in_ptr1 + (64 + r0), None) tmp24 = tl.load(in_ptr0 + (192 + r0), None) tmp25 = tl.load(in_ptr1 + (192 + r0), None) tmp36 = tl.load(in_ptr0 + (128 + r0), None) tmp37 = tl.load(in_ptr1 + (128 + r0), None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tmp9 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp14 = tmp12 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.sum(tmp15, 1)[:, None] tmp18 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp20 = tl.sum(tmp18, 1)[:, None] tmp21 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp23 = tl.sum(tmp21, 1)[:, None] tmp26 = tmp24 * tmp25 tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK]) tmp29 = tl.sum(tmp27, 1)[:, None] tmp30 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp32 = tl.sum(tmp30, 1)[:, None] tmp33 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp35 = tl.sum(tmp33, 1)[:, None] tmp38 = tmp36 * tmp37 tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK]) tmp41 = tl.sum(tmp39, 1)[:, None] tmp42 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp44 = tl.sum(tmp42, 1)[:, None] tmp45 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK]) tmp47 = tl.sum(tmp45, 1)[:, None] tmp48 = 2.0 tmp49 = tmp5 * tmp48 tmp50 = 100.0 tmp51 = tmp49 + tmp50 tmp52 = tmp8 + tmp11 tmp53 = tmp52 + tmp50 tmp54 = tmp51 / tmp53 tmp55 = 1.0 tmp56 = tmp55 - tmp54 tmp57 = 0.0 tmp58 = tmp56 + tmp57 tmp59 = tmp17 * tmp48 tmp60 = tmp59 + tmp50 tmp61 = tmp20 + tmp23 tmp62 = tmp61 + tmp50 tmp63 = tmp60 / tmp62 tmp64 = tmp55 - tmp63 tmp65 = tmp58 + tmp64 tmp66 = tmp41 * tmp48 tmp67 = tmp66 + tmp50 tmp68 = tmp44 + tmp47 tmp69 = tmp68 + tmp50 tmp70 = tmp67 / tmp69 tmp71 = tmp55 - tmp70 tmp72 = tmp65 + tmp71 tmp73 = tmp29 * tmp48 tmp74 = tmp73 + tmp50 tmp75 = tmp32 + tmp35 tmp76 = tmp75 + tmp50 tmp77 = tmp74 / tmp76 tmp78 = tmp55 - tmp77 tmp79 = tmp72 + tmp78 tmp80 = 0.25 tmp81 = tmp79 * tmp80 tl.debug_barrier() tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp81, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf10 = empty_strided_cuda((), (), torch.float32) buf13 = buf10 del buf10 get_raw_stream(0) triton_per_fused_add_div_mul_rsub_sum_0[grid(1)](buf13, arg1_1, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf13, class DiceLossNew(nn.Module): """DICE loss. """ def __init__(self, size_average=True, reduce=True, smooth=100.0, power=1): super(DiceLossNew, self).__init__() self.smooth = smooth self.reduce = reduce self.power = power def dice_loss(self, pred, target): loss = 0.0 for index in range(pred.size()[0]): iflat = pred[index].view(-1) tflat = target[index].view(-1) intersection = (iflat * tflat).sum() if self.power == 1: loss += 1 - (2.0 * intersection + self.smooth) / (iflat.sum () + tflat.sum() + self.smooth) else: loss += 1 - (2.0 * intersection + self.smooth) / ((iflat ** self.power).sum() + (tflat ** self.power).sum() + self. smooth) return loss / float(pred.size()[0]) def dice_loss_batch(self, pred, target): iflat = pred.view(-1) tflat = target.view(-1) intersection = (iflat * tflat).sum() if self.power == 1: loss = 1 - (2.0 * intersection + self.smooth) / (iflat.sum() + tflat.sum() + self.smooth) else: loss = 1 - (2.0 * intersection + self.smooth) / ((iflat ** self .power).sum() + (tflat ** self.power).sum() + self.smooth) return loss def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
matinraayai/pytorch_connectomics
DiceLoss
false
3,982
[ "MIT" ]
0
b11a2f7e71a8d1442fb05f7a6edfaaaa7b0d9205
https://github.com/matinraayai/pytorch_connectomics/tree/b11a2f7e71a8d1442fb05f7a6edfaaaa7b0d9205
Mish
import torch import torch.nn as nn class Mish(nn.Module): def __init__(self): super().__init__() def forward(self, x): return torch.mul(x, torch.tanh(torch.log(1 + torch.exp(x)))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_exp_log_mul_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.exp(tmp0) tmp2 = 1.0 tmp3 = tmp1 + tmp2 tmp4 = tl_math.log(tmp3) tmp5 = libdevice.tanh(tmp4) tmp6 = tmp0 * tmp5 tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_exp_log_mul_tanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class MishNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
mattroz/yatopi
Mish
false
3,983
[ "MIT" ]
0
278bac6f3d2f13916ae9d43309b9f38b608426bd
https://github.com/mattroz/yatopi/tree/278bac6f3d2f13916ae9d43309b9f38b608426bd
PatchEmbed3D
import torch import torch.nn as nn import torch.nn.functional as F class PatchEmbed3D(nn.Module): """ Video to Patch Embedding. Args: patch_size (int): Patch token size. Default: (2,4,4). in_chans (int): Number of input video channels. Default: 3. embed_dim (int): Number of linear projection output channels. Default: 96. norm_layer (nn.Module, optional): Normalization layer. Default: None """ def __init__(self, patch_size=(2, 4, 4), in_chans=3, embed_dim=96, norm_layer=None): super().__init__() self.patch_size = patch_size self.in_chans = in_chans self.embed_dim = embed_dim self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) if norm_layer is not None: self.norm = norm_layer(embed_dim) else: self.norm = None def forward(self, x): """Forward function.""" _, _, D, H, W = x.size() if W % self.patch_size[2] != 0: x = F.pad(x, (0, self.patch_size[2] - W % self.patch_size[2])) if H % self.patch_size[1] != 0: x = F.pad(x, (0, 0, 0, self.patch_size[1] - H % self.patch_size[1]) ) if D % self.patch_size[0] != 0: x = F.pad(x, (0, 0, 0, 0, 0, self.patch_size[0] - D % self. patch_size[0])) x = self.proj(x) if self.norm is not None: D, Wh, Ww = x.size(2), x.size(3), x.size(4) x = x.flatten(2).transpose(1, 2) x = self.norm(x) x = x.transpose(1, 2).view(-1, self.embed_dim, D, Wh, Ww) return x def get_inputs(): return [torch.rand([4, 3, 64, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 8192 % 96 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 3, 64, 64, 64), (786432, 262144, 4096, 64, 1)) assert_size_stride(primals_2, (96, 3, 2, 4, 4), (96, 32, 16, 4, 1)) assert_size_stride(primals_3, (96,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2, 4, 4), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 96, 32, 16, 16), (786432, 8192, 256, 16, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(3145728)](buf1, primals_3, 3145728, XBLOCK=1024, num_warps=4, num_stages=1) del primals_3 return buf1, primals_1, primals_2 class PatchEmbed3DNew(nn.Module): """ Video to Patch Embedding. Args: patch_size (int): Patch token size. Default: (2,4,4). in_chans (int): Number of input video channels. Default: 3. embed_dim (int): Number of linear projection output channels. Default: 96. norm_layer (nn.Module, optional): Normalization layer. Default: None """ def __init__(self, patch_size=(2, 4, 4), in_chans=3, embed_dim=96, norm_layer=None): super().__init__() self.patch_size = patch_size self.in_chans = in_chans self.embed_dim = embed_dim self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) if norm_layer is not None: self.norm = norm_layer(embed_dim) else: self.norm = None def forward(self, input_0): primals_2 = self.proj.weight primals_3 = self.proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
luohwu/video-swin-transformer-pytorch
PatchEmbed3D
false
3,984
[ "MIT" ]
0
ad96877a6db44436183a03e5b9a80c425726c982
https://github.com/luohwu/video-swin-transformer-pytorch/tree/ad96877a6db44436183a03e5b9a80c425726c982
JaccardLoss
import torch import torch.utils.data import torch.nn as nn from abc import ABC class JaccardLoss(nn.Module, ABC): """Jaccard loss. """ def __init__(self, size_average=True, reduce=True, smooth=1.0): super(JaccardLoss, self).__init__() self.smooth = smooth self.reduce = reduce def jaccard_loss(self, pred, target): loss = 0.0 for index in range(pred.size()[0]): iflat = pred[index].view(-1) tflat = target[index].view(-1) intersection = (iflat * tflat).sum() loss += 1 - (intersection + self.smooth) / (iflat.sum() + tflat .sum() - intersection + self.smooth) return loss / float(pred.size()[0]) def jaccard_loss_batch(self, pred, target): iflat = pred.view(-1) tflat = target.view(-1) intersection = (iflat * tflat).sum() loss = 1 - (intersection + self.smooth) / (iflat.sum() + tflat.sum( ) - intersection + self.smooth) return loss def forward(self, pred, target): if not target.size() == pred.size(): raise ValueError( 'Target size ({}) must be the same as pred size ({})'. format(target.size(), pred.size())) if self.reduce: loss = self.jaccard_loss(pred, target) else: loss = self.jaccard_loss_batch(pred, target) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.nn as nn from abc import ABC assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mul_rsub_sub_sum_0(in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp12 = tl.load(in_ptr0 + (64 + r0), None) tmp13 = tl.load(in_ptr1 + (64 + r0), None) tmp24 = tl.load(in_ptr0 + (192 + r0), None) tmp25 = tl.load(in_ptr1 + (192 + r0), None) tmp36 = tl.load(in_ptr0 + (128 + r0), None) tmp37 = tl.load(in_ptr1 + (128 + r0), None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tmp9 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp14 = tmp12 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.sum(tmp15, 1)[:, None] tmp18 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp20 = tl.sum(tmp18, 1)[:, None] tmp21 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp23 = tl.sum(tmp21, 1)[:, None] tmp26 = tmp24 * tmp25 tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK]) tmp29 = tl.sum(tmp27, 1)[:, None] tmp30 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp32 = tl.sum(tmp30, 1)[:, None] tmp33 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp35 = tl.sum(tmp33, 1)[:, None] tmp38 = tmp36 * tmp37 tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK]) tmp41 = tl.sum(tmp39, 1)[:, None] tmp42 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp44 = tl.sum(tmp42, 1)[:, None] tmp45 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK]) tmp47 = tl.sum(tmp45, 1)[:, None] tmp48 = 1.0 tmp49 = tmp5 + tmp48 tmp50 = tmp8 + tmp11 tmp51 = tmp50 - tmp5 tmp52 = tmp51 + tmp48 tmp53 = tmp49 / tmp52 tmp54 = tmp48 - tmp53 tmp55 = 0.0 tmp56 = tmp54 + tmp55 tmp57 = tmp17 + tmp48 tmp58 = tmp20 + tmp23 tmp59 = tmp58 - tmp17 tmp60 = tmp59 + tmp48 tmp61 = tmp57 / tmp60 tmp62 = tmp48 - tmp61 tmp63 = tmp56 + tmp62 tmp64 = tmp41 + tmp48 tmp65 = tmp44 + tmp47 tmp66 = tmp65 - tmp41 tmp67 = tmp66 + tmp48 tmp68 = tmp64 / tmp67 tmp69 = tmp48 - tmp68 tmp70 = tmp63 + tmp69 tmp71 = tmp29 + tmp48 tmp72 = tmp32 + tmp35 tmp73 = tmp72 - tmp29 tmp74 = tmp73 + tmp48 tmp75 = tmp71 / tmp74 tmp76 = tmp48 - tmp75 tmp77 = tmp70 + tmp76 tmp78 = 0.25 tmp79 = tmp77 * tmp78 tl.debug_barrier() tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp79, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf10 = empty_strided_cuda((), (), torch.float32) buf13 = buf10 del buf10 get_raw_stream(0) triton_per_fused_add_div_mul_rsub_sub_sum_0[grid(1)](buf13, arg1_1, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf13, class JaccardLossNew(nn.Module, ABC): """Jaccard loss. """ def __init__(self, size_average=True, reduce=True, smooth=1.0): super(JaccardLossNew, self).__init__() self.smooth = smooth self.reduce = reduce def jaccard_loss(self, pred, target): loss = 0.0 for index in range(pred.size()[0]): iflat = pred[index].view(-1) tflat = target[index].view(-1) intersection = (iflat * tflat).sum() loss += 1 - (intersection + self.smooth) / (iflat.sum() + tflat .sum() - intersection + self.smooth) return loss / float(pred.size()[0]) def jaccard_loss_batch(self, pred, target): iflat = pred.view(-1) tflat = target.view(-1) intersection = (iflat * tflat).sum() loss = 1 - (intersection + self.smooth) / (iflat.sum() + tflat.sum( ) - intersection + self.smooth) return loss def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
matinraayai/pytorch_connectomics
JaccardLoss
false
3,985
[ "MIT" ]
0
b11a2f7e71a8d1442fb05f7a6edfaaaa7b0d9205
https://github.com/matinraayai/pytorch_connectomics/tree/b11a2f7e71a8d1442fb05f7a6edfaaaa7b0d9205
Network
import torch import torch.nn as nn import torch.nn.parallel import torch.utils.data import torch.nn.functional as F class Network(nn.Module): def __init__(self): super(Network, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 2) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def get_inputs(): return [torch.rand([4, 3, 32, 32])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.parallel import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 18816 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 784 % 6 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4704 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x3 = xindex // 14 x2 = xindex // 1176 x4 = xindex % 1176 tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + 1184 * x2), tmp6, xmask) tl.store(out_ptr1 + (x4 + 1280 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 100 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 120 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 336 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 84 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (6,), (1,)) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (120, 400), (400, 1)) assert_size_stride(primals_7, (120,), (1,)) assert_size_stride(primals_8, (84, 120), (120, 1)) assert_size_stride(primals_9, (84,), (1,)) assert_size_stride(primals_10, (2, 84), (84, 1)) assert_size_stride(primals_11, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(18816)](buf1, primals_2, 18816, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch .float32) buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch .int8) triton_poi_fused_max_pool2d_with_indices_1[grid(4704)](buf1, buf2, buf3, 4704, XBLOCK=256, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(6400)](buf5, primals_5, 6400, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8) buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32 ) triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf5, buf6, buf7, 1600, XBLOCK=128, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 120), (1, 400), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(480)](buf9, primals_7, 480, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 84), (84, 1), torch.float32) extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (120, 84), (1, 120), 0), out=buf10) buf11 = buf10 del buf10 triton_poi_fused_relu_5[grid(336)](buf11, primals_9, 336, XBLOCK= 256, num_warps=4, num_stages=1) del primals_9 buf12 = empty_strided_cuda((4, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_11, buf11, reinterpret_tensor( primals_10, (84, 2), (1, 84), 0), alpha=1, beta=1, out=buf12) del primals_11 return (buf12, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf9, buf11, primals_10, primals_8, primals_6) class NetworkNew(nn.Module): def __init__(self): super(NetworkNew, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 2) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_10 = self.fc3.weight primals_11 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
markosej11/Image-Claasification
Network
false
3,986
[ "MIT" ]
0
0fea42726f36b582829a44e6fcebf8af89b518fc
https://github.com/markosej11/Image-Claasification/tree/0fea42726f36b582829a44e6fcebf8af89b518fc
VGGBase
import torch import torchvision import torch.utils.data from torch import nn import torch.nn.functional as F from itertools import product as product import torch.optim def decimate(tensor, m): """ Decimate a tensor by a factor 'm', i.e. downsample by keeping every 'm'th value. This is used when we convert FC layers to equivalent Convolutional layers, BUT of a smaller size. :param tensor: tensor to be decimated :param m: list of decimation factors for each dimension of the tensor; None if not to be decimated along a dimension :return: decimated tensor """ assert tensor.dim() == len(m) for d in range(tensor.dim()): if m[d] is not None: tensor = tensor.index_select(dim=d, index=torch.arange(start=0, end=tensor.size(d), step=m[d]).long()) return tensor class VGGBase(nn.Module): """ VGG base convolutions to produce lower-level feature maps. """ def __init__(self): super(VGGBase, self).__init__() self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1) self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1) self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1) self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1) self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1) self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True) self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=1) self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) self.conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6) self.conv7 = nn.Conv2d(1024, 1024, kernel_size=1) self.load_pretrained_layers() def forward(self, image): """ Forward propagation. :param image: images, a tensor of dimensions (N, 3, 300, 300) :return: lower-level feature maps conv4_3 and conv7 """ out = F.relu(self.conv1_1(image)) out = F.relu(self.conv1_2(out)) out = self.pool1(out) out = F.relu(self.conv2_1(out)) out = F.relu(self.conv2_2(out)) out = self.pool2(out) out = F.relu(self.conv3_1(out)) out = F.relu(self.conv3_2(out)) out = F.relu(self.conv3_3(out)) out = self.pool3(out) out = F.relu(self.conv4_1(out)) out = F.relu(self.conv4_2(out)) out = F.relu(self.conv4_3(out)) conv4_3_feats = out out = self.pool4(out) out = F.relu(self.conv5_1(out)) out = F.relu(self.conv5_2(out)) out = F.relu(self.conv5_3(out)) out = self.pool5(out) out = F.relu(self.conv6(out)) conv7_feats = F.relu(self.conv7(out)) return conv4_3_feats, conv7_feats def load_pretrained_layers(self): """ As in the paper, we use a VGG-16 pretrained on the ImageNet task as the base network. There's one available in PyTorch, see https://pytorch.org/docs/stable/torchvision/models.html#torchvision.models.vgg16 We copy these parameters into our network. It's straightforward for conv1 to conv5. However, the original VGG-16 does not contain the conv6 and con7 layers. Therefore, we convert fc6 and fc7 into convolutional layers, and subsample by decimation. See 'decimate' in utils.py. """ state_dict = self.state_dict() param_names = list(state_dict.keys()) pretrained_state_dict = torchvision.models.vgg16(pretrained=True ).state_dict() pretrained_param_names = list(pretrained_state_dict.keys()) for i, param in enumerate(param_names[:-4]): state_dict[param] = pretrained_state_dict[pretrained_param_names[i] ] conv_fc6_weight = pretrained_state_dict['classifier.0.weight'].view( 4096, 512, 7, 7) conv_fc6_bias = pretrained_state_dict['classifier.0.bias'] state_dict['conv6.weight'] = decimate(conv_fc6_weight, m=[4, None, 3, 3]) state_dict['conv6.bias'] = decimate(conv_fc6_bias, m=[4]) conv_fc7_weight = pretrained_state_dict['classifier.3.weight'].view( 4096, 4096, 1, 1) conv_fc7_bias = pretrained_state_dict['classifier.3.bias'] state_dict['conv7.weight'] = decimate(conv_fc7_weight, m=[4, 4, None, None]) state_dict['conv7.bias'] = decimate(conv_fc7_bias, m=[4]) self.load_state_dict(state_dict) None def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torchvision import torch.utils.data from torch import nn from itertools import product as product import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_9(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_11(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = xindex // 64 % 32 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 128 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4160 + x0 + 128 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_13(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 % 16 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 256 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4224 + x0 + 256 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_15(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 256 x1 = xindex // 256 % 8 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 512 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4352 + x0 + 512 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_17(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 512 y1 = yindex // 512 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 512 * x2 + 32768 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x2 + 64 * y3), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_18(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex % 4 x3 = xindex // 4 y4 = yindex x5 = xindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (2 * x2 + 16 * x3 + 64 * y4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x2 + 16 * x3 + 64 * y4), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (8 + 2 * x2 + 16 * x3 + 64 * y4), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (9 + 2 * x2 + 16 * x3 + 64 * y4), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1, 1], 1, tl.int8) tmp9 = tl.full([1, 1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1, 1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1, 1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (y0 + 512 * x5 + 8192 * y1), tmp6, xmask) tl.store(out_ptr1 + (y0 + 512 * x5 + 8192 * y1), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_19(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_20(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 2048 % 4 x1 = xindex // 512 % 4 x6 = xindex tmp0 = -1 + x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + x1 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-2560 + x6), tmp10, other=float('-inf')) tmp12 = x1 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-2048 + x6), tmp16, other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + x1 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + (-1536 + x6), tmp23, other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = x2 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp29 & tmp9 tmp31 = tl.load(in_ptr0 + (-512 + x6), tmp30, other=float('-inf')) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = tmp29 & tmp15 tmp34 = tl.load(in_ptr0 + x6, tmp33, other=float('-inf')) tmp35 = triton_helpers.maximum(tmp34, tmp32) tmp36 = tmp29 & tmp22 tmp37 = tl.load(in_ptr0 + (512 + x6), tmp36, other=float('-inf')) tmp38 = triton_helpers.maximum(tmp37, tmp35) tmp39 = 1 + x2 tmp40 = tmp39 >= tmp1 tmp41 = tmp39 < tmp3 tmp42 = tmp40 & tmp41 tmp43 = tmp42 & tmp9 tmp44 = tl.load(in_ptr0 + (1536 + x6), tmp43, other=float('-inf')) tmp45 = triton_helpers.maximum(tmp44, tmp38) tmp46 = tmp42 & tmp15 tmp47 = tl.load(in_ptr0 + (2048 + x6), tmp46, other=float('-inf')) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = tmp42 & tmp22 tmp50 = tl.load(in_ptr0 + (2560 + x6), tmp49, other=float('-inf')) tmp51 = triton_helpers.maximum(tmp50, tmp48) tmp52 = tmp17 > tmp11 tmp53 = tl.full([1], 1, tl.int8) tmp54 = tl.full([1], 0, tl.int8) tmp55 = tl.where(tmp52, tmp53, tmp54) tmp56 = tmp24 > tmp18 tmp57 = tl.full([1], 2, tl.int8) tmp58 = tl.where(tmp56, tmp57, tmp55) tmp59 = tmp31 > tmp25 tmp60 = tl.full([1], 3, tl.int8) tmp61 = tl.where(tmp59, tmp60, tmp58) tmp62 = tmp34 > tmp32 tmp63 = tl.full([1], 4, tl.int8) tmp64 = tl.where(tmp62, tmp63, tmp61) tmp65 = tmp37 > tmp35 tmp66 = tl.full([1], 5, tl.int8) tmp67 = tl.where(tmp65, tmp66, tmp64) tmp68 = tmp44 > tmp38 tmp69 = tl.full([1], 6, tl.int8) tmp70 = tl.where(tmp68, tmp69, tmp67) tmp71 = tmp47 > tmp45 tmp72 = tl.full([1], 7, tl.int8) tmp73 = tl.where(tmp71, tmp72, tmp70) tmp74 = tmp50 > tmp48 tmp75 = tl.full([1], 8, tl.int8) tmp76 = tl.where(tmp74, tmp75, tmp73) tl.store(out_ptr0 + x6, tmp51, None) tl.store(out_ptr1 + x6, tmp76, None) @triton.jit def triton_poi_fused_convolution_relu_21(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_22(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 1024 y1 = yindex // 1024 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 1024 * x2 + 16384 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask) tl.store(out_ptr1 + (y0 + 1024 * x2 + 16384 * y1), tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31) = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256,), (1,)) assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (512,), (1,)) assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_19, (512,), (1,)) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512,), (1,)) assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (512,), (1,)) assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (512,), (1,)) assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_27, (512,), (1,)) assert_size_stride(primals_28, (1024, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_29, (1024,), (1,)) assert_size_stride(primals_30, (1024, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_31, (1024,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(192, 9)](primals_1, buf0, 192, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch. float32) triton_poi_fused_2[grid(4096, 9)](primals_4, buf2, 4096, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_4[grid(16384, 9)](primals_8, buf4, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_5[grid(32768, 9)](primals_10, buf5, 32768, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_6[grid(65536, 9)](primals_12, buf6, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_12 buf7 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_6[grid(65536, 9)](primals_14, buf7, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_14 buf8 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_7[grid(131072, 9)](primals_16, buf8, 131072, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_16 buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_18, buf9, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_18 buf10 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_20, buf10, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_20 buf11 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_22, buf11, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_22 buf12 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_24, buf12, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_24 buf13 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_26, buf13, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_26 buf14 = empty_strided_cuda((1024, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_9[grid(524288, 9)](primals_28, buf14, 524288, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_28 buf15 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf16 = buf15 del buf15 triton_poi_fused_convolution_relu_10[grid(1048576)](buf16, primals_2, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf17 = extern_kernels.convolution(buf16, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf18 = buf17 del buf17 triton_poi_fused_convolution_relu_10[grid(1048576)](buf18, primals_5, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf19 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32) buf20 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.int8) triton_poi_fused_max_pool2d_with_indices_11[grid(262144)](buf18, buf19, buf20, 262144, XBLOCK=1024, num_warps=4, num_stages=1) buf21 = extern_kernels.convolution(buf19, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf22 = buf21 del buf21 triton_poi_fused_convolution_relu_12[grid(524288)](buf22, primals_7, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf23 = extern_kernels.convolution(buf22, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf24 = buf23 del buf23 triton_poi_fused_convolution_relu_12[grid(524288)](buf24, primals_9, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf25 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.float32) buf26 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.int8) triton_poi_fused_max_pool2d_with_indices_13[grid(131072)](buf24, buf25, buf26, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf27 = extern_kernels.convolution(buf25, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf28 = buf27 del buf27 triton_poi_fused_convolution_relu_14[grid(262144)](buf28, primals_11, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf29 = extern_kernels.convolution(buf28, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf29, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf30 = buf29 del buf29 triton_poi_fused_convolution_relu_14[grid(262144)](buf30, primals_13, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf31 = extern_kernels.convolution(buf30, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf31, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf32 = buf31 del buf31 triton_poi_fused_convolution_relu_14[grid(262144)](buf32, primals_15, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_15 buf33 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.float32) buf34 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.int8) triton_poi_fused_max_pool2d_with_indices_15[grid(65536)](buf32, buf33, buf34, 65536, XBLOCK=512, num_warps=4, num_stages=1) buf35 = extern_kernels.convolution(buf33, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf35, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf36 = buf35 del buf35 triton_poi_fused_convolution_relu_16[grid(131072)](buf36, primals_17, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_17 buf37 = extern_kernels.convolution(buf36, buf9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf37, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf38 = buf37 del buf37 triton_poi_fused_convolution_relu_16[grid(131072)](buf38, primals_19, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_19 buf39 = extern_kernels.convolution(buf38, buf10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf39, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf40 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch .float32) triton_poi_fused_convolution_relu_17[grid(2048, 64)](buf39, primals_21, buf40, 2048, 64, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf39 del primals_21 buf41 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.float32) buf42 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.int8) triton_poi_fused_max_pool2d_with_indices_18[grid(2048, 16)](buf40, buf41, buf42, 2048, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) buf43 = extern_kernels.convolution(buf41, buf11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf43, (4, 512, 4, 4), (8192, 1, 2048, 512)) buf44 = buf43 del buf43 triton_poi_fused_convolution_relu_19[grid(32768)](buf44, primals_23, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_23 buf45 = extern_kernels.convolution(buf44, buf12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf45, (4, 512, 4, 4), (8192, 1, 2048, 512)) buf46 = buf45 del buf45 triton_poi_fused_convolution_relu_19[grid(32768)](buf46, primals_25, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_25 buf47 = extern_kernels.convolution(buf46, buf13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf47, (4, 512, 4, 4), (8192, 1, 2048, 512)) buf48 = buf47 del buf47 triton_poi_fused_convolution_relu_19[grid(32768)](buf48, primals_27, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_27 buf49 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.float32) buf50 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.int8) triton_poi_fused_max_pool2d_with_indices_20[grid(32768)](buf48, buf49, buf50, 32768, XBLOCK=256, num_warps=4, num_stages=1) buf51 = extern_kernels.convolution(buf49, buf14, stride=(1, 1), padding=(6, 6), dilation=(6, 6), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf51, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf52 = buf51 del buf51 triton_poi_fused_convolution_relu_21[grid(65536)](buf52, primals_29, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_29 buf53 = extern_kernels.convolution(buf52, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf53, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf54 = empty_strided_cuda((4, 1024, 4, 4), (16384, 16, 4, 1), torch.float32) buf55 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_22[grid(4096, 16) ](buf53, primals_31, buf54, buf55, 4096, 16, XBLOCK=16, YBLOCK= 64, num_warps=4, num_stages=1) del buf53 del primals_31 return (buf40, buf54, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf10, buf11, buf12, buf13, buf14, primals_30, buf16, buf18, buf19, buf20, buf22, buf24, buf25, buf26, buf28, buf30, buf32, buf33, buf34, buf36, buf38, buf40, buf41, buf42, buf44, buf46, buf48, buf49, buf50, buf52, buf55) def decimate(tensor, m): """ Decimate a tensor by a factor 'm', i.e. downsample by keeping every 'm'th value. This is used when we convert FC layers to equivalent Convolutional layers, BUT of a smaller size. :param tensor: tensor to be decimated :param m: list of decimation factors for each dimension of the tensor; None if not to be decimated along a dimension :return: decimated tensor """ assert tensor.dim() == len(m) for d in range(tensor.dim()): if m[d] is not None: tensor = tensor.index_select(dim=d, index=torch.arange(start=0, end=tensor.size(d), step=m[d]).long()) return tensor class VGGBaseNew(nn.Module): """ VGG base convolutions to produce lower-level feature maps. """ def __init__(self): super(VGGBaseNew, self).__init__() self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1) self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1) self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1) self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1) self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1) self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True) self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=1) self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) self.conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6) self.conv7 = nn.Conv2d(1024, 1024, kernel_size=1) self.load_pretrained_layers() def load_pretrained_layers(self): """ As in the paper, we use a VGG-16 pretrained on the ImageNet task as the base network. There's one available in PyTorch, see https://pytorch.org/docs/stable/torchvision/models.html#torchvision.models.vgg16 We copy these parameters into our network. It's straightforward for conv1 to conv5. However, the original VGG-16 does not contain the conv6 and con7 layers. Therefore, we convert fc6 and fc7 into convolutional layers, and subsample by decimation. See 'decimate' in utils.py. """ state_dict = self.state_dict() param_names = list(state_dict.keys()) pretrained_state_dict = torchvision.models.vgg16(pretrained=True ).state_dict() pretrained_param_names = list(pretrained_state_dict.keys()) for i, param in enumerate(param_names[:-4]): state_dict[param] = pretrained_state_dict[pretrained_param_names[i] ] conv_fc6_weight = pretrained_state_dict['classifier.0.weight'].view( 4096, 512, 7, 7) conv_fc6_bias = pretrained_state_dict['classifier.0.bias'] state_dict['conv6.weight'] = decimate(conv_fc6_weight, m=[4, None, 3, 3]) state_dict['conv6.bias'] = decimate(conv_fc6_bias, m=[4]) conv_fc7_weight = pretrained_state_dict['classifier.3.weight'].view( 4096, 4096, 1, 1) conv_fc7_bias = pretrained_state_dict['classifier.3.bias'] state_dict['conv7.weight'] = decimate(conv_fc7_weight, m=[4, 4, None, None]) state_dict['conv7.bias'] = decimate(conv_fc7_bias, m=[4]) self.load_state_dict(state_dict) None def forward(self, input_0): primals_1 = self.conv1_1.weight primals_2 = self.conv1_1.bias primals_4 = self.conv1_2.weight primals_5 = self.conv1_2.bias primals_6 = self.conv2_1.weight primals_7 = self.conv2_1.bias primals_8 = self.conv2_2.weight primals_9 = self.conv2_2.bias primals_10 = self.conv3_1.weight primals_11 = self.conv3_1.bias primals_12 = self.conv3_2.weight primals_13 = self.conv3_2.bias primals_14 = self.conv3_3.weight primals_15 = self.conv3_3.bias primals_16 = self.conv4_1.weight primals_17 = self.conv4_1.bias primals_18 = self.conv4_2.weight primals_19 = self.conv4_2.bias primals_20 = self.conv4_3.weight primals_21 = self.conv4_3.bias primals_22 = self.conv5_1.weight primals_23 = self.conv5_1.bias primals_24 = self.conv5_2.weight primals_25 = self.conv5_2.bias primals_26 = self.conv5_3.weight primals_27 = self.conv5_3.bias primals_28 = self.conv6.weight primals_29 = self.conv6.bias primals_30 = self.conv7.weight primals_31 = self.conv7.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31]) return output[0], output[1]
adityag6994/pytorch_ssd_training
VGGBase
false
3,987
[ "MIT" ]
0
404f3cbef815e314337ec2c1b4f06a2403a7ce03
https://github.com/adityag6994/pytorch_ssd_training/tree/404f3cbef815e314337ec2c1b4f06a2403a7ce03
sSE
import torch import torch.nn as nn class sSE(nn.Module): def __init__(self, in_channels): super().__init__() self.pointwise = nn.Conv2d(in_channels=in_channels, out_channels=1, kernel_size=1) self.sigmoid = nn.Sigmoid() def forward(self, input_tensor): x = self.pointwise(input_tensor) x = self.sigmoid(x) x = torch.mul(input_tensor, x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x3, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(64)](buf1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_1[grid(256)](primals_3, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf2, primals_1, primals_3, buf1 class sSENew(nn.Module): def __init__(self, in_channels): super().__init__() self.pointwise = nn.Conv2d(in_channels=in_channels, out_channels=1, kernel_size=1) self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_1 = self.pointwise.weight primals_2 = self.pointwise.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
mattroz/yatopi
sSE
false
3,988
[ "MIT" ]
0
278bac6f3d2f13916ae9d43309b9f38b608426bd
https://github.com/mattroz/yatopi/tree/278bac6f3d2f13916ae9d43309b9f38b608426bd
_FakeMegatronMLP
from _paritybench_helpers import _mock_config import torch import torch.nn as nn import torch.nn.functional as F class _FakeMegatronMLP(nn.Module): """ A fake mlp without model parallelism for correctness testing """ def __init__(self, args, _): super().__init__() self.fc1 = nn.Linear(args.hidden_size, args.hidden_hidden_size) self.fc2 = nn.Linear(args.hidden_hidden_size, args.hidden_size) def forward(self, x): """ Directly use GeLU """ x = self.fc1(x) x = F.gelu(x) x = self.fc2(x) return x, torch.zeros_like(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'args': _mock_config(hidden_size=4, hidden_hidden_size=4), '_': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_zeros_like_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_gelu_0[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_zeros_like_1[grid(256)](buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4 class _FakeMegatronMLPNew(nn.Module): """ A fake mlp without model parallelism for correctness testing """ def __init__(self, args, _): super().__init__() self.fc1 = nn.Linear(args.hidden_size, args.hidden_hidden_size) self.fc2 = nn.Linear(args.hidden_hidden_size, args.hidden_size) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
liuhatry/fastmoe
_FakeMegatronMLP
false
3,989
[ "Apache-2.0" ]
0
a676bf1eae874c208a0e669bf0f79e6fb3b43623
https://github.com/liuhatry/fastmoe/tree/a676bf1eae874c208a0e669bf0f79e6fb3b43623
cSE
import torch import torch.nn as nn class cSE(nn.Module): def __init__(self, in_channels): super().__init__() reduced_filters = 1 if in_channels // 2 == 0 else in_channels // 2 self.global_avg_pool = nn.AdaptiveAvgPool2d(output_size=(1, 1)) self.pointwise_1 = nn.Conv2d(in_channels=in_channels, out_channels= reduced_filters, kernel_size=1) self.pointwise_2 = nn.Conv2d(in_channels=reduced_filters, out_channels=in_channels, kernel_size=1) self.sigmoid = nn.Sigmoid() self.relu = nn.ReLU6() def forward(self, input_tensor): x = self.global_avg_pool(input_tensor) x = self.pointwise_1(x) x = self.relu(x) x = self.pointwise_2(x) x = self.sigmoid(x) x = torch.mul(input_tensor, x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_hardtanh_hardtanh_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp2 <= tmp3 tmp8 = tmp2 >= tmp5 tmp9 = tmp7 | tmp8 tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (2,), (1,)) assert_size_stride(primals_4, (4, 2, 1, 1), (2, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 2, 1, 1), (2, 1, 1, 1)) buf3 = empty_strided_cuda((4, 2, 1, 1), (2, 1, 1, 1), torch.float32) buf7 = empty_strided_cuda((4, 2, 1, 1), (2, 1, 1, 1), torch.bool) triton_poi_fused_convolution_hardtanh_hardtanh_backward_1[grid(8)](buf2 , primals_3, buf3, buf7, 8, XBLOCK=8, num_warps=1, num_stages=1) del buf2 del primals_3 buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(16)](buf5, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_3[grid(256)](primals_1, buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf6, primals_1, primals_2, primals_4, buf1, buf3, buf5, buf7 class cSENew(nn.Module): def __init__(self, in_channels): super().__init__() reduced_filters = 1 if in_channels // 2 == 0 else in_channels // 2 self.global_avg_pool = nn.AdaptiveAvgPool2d(output_size=(1, 1)) self.pointwise_1 = nn.Conv2d(in_channels=in_channels, out_channels= reduced_filters, kernel_size=1) self.pointwise_2 = nn.Conv2d(in_channels=reduced_filters, out_channels=in_channels, kernel_size=1) self.sigmoid = nn.Sigmoid() self.relu = nn.ReLU6() def forward(self, input_0): primals_2 = self.pointwise_1.weight primals_3 = self.pointwise_1.bias primals_4 = self.pointwise_2.weight primals_5 = self.pointwise_2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
mattroz/yatopi
cSE
false
3,990
[ "MIT" ]
0
278bac6f3d2f13916ae9d43309b9f38b608426bd
https://github.com/mattroz/yatopi/tree/278bac6f3d2f13916ae9d43309b9f38b608426bd
AlphaMish
import torch class AlphaMish(torch.nn.Module): def __init__(self, in_features): super().__init__() self.alpha = torch.nn.Parameter(torch.zeros((in_features, 1, 1))) self.alpha.requires_grad = True def forward(self, x): return torch.mul(x, torch.tanh(torch.mul(1 + torch.nn.functional. softplus(self.alpha), torch.nn.functional.softplus(x)))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_softplus_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = 20.0 tmp3 = tmp1 > tmp2 tmp4 = tl_math.exp(tmp1) tmp5 = libdevice.log1p(tmp4) tmp6 = tl.where(tmp3, tmp1, tmp5) tmp7 = 1.0 tmp8 = tmp6 + tmp7 tmp9 = tmp0 > tmp2 tmp10 = tl_math.exp(tmp0) tmp11 = libdevice.log1p(tmp10) tmp12 = tl.where(tmp9, tmp0, tmp11) tmp13 = tmp8 * tmp12 tmp14 = libdevice.tanh(tmp13) tmp15 = tmp0 * tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 1, 1), (1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_softplus_tanh_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf0, primals_1, primals_2 class AlphaMishNew(torch.nn.Module): def __init__(self, in_features): super().__init__() self.alpha = torch.nn.Parameter(torch.zeros((in_features, 1, 1))) self.alpha.requires_grad = True def forward(self, input_0): primals_1 = self.alpha primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
mattroz/yatopi
AlphaMish
false
3,991
[ "MIT" ]
0
278bac6f3d2f13916ae9d43309b9f38b608426bd
https://github.com/mattroz/yatopi/tree/278bac6f3d2f13916ae9d43309b9f38b608426bd
SimpleErfModule
import torch import torch.jit import torch.onnx import torch.nn class SimpleErfModule(torch.nn.Module): def forward(self, input): return torch.special.erf(input) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_erf_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.erf(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_erf_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleErfModuleNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
mciprian13/glow
SimpleErfModule
false
3,992
[ "Apache-2.0" ]
0
90f88205d9bf8baff8df5bbda51c9d138e3e668b
https://github.com/mciprian13/glow/tree/90f88205d9bf8baff8df5bbda51c9d138e3e668b
SimpleLeakyReluModule
import torch import torch.jit import torch.onnx import torch.nn class SimpleLeakyReluModule(torch.nn.Module): def __init__(self, negative_slope=0.01, inplace=False): super(SimpleLeakyReluModule, self).__init__() self.negative_slope = negative_slope self.inplace = inplace def forward(self, a): return torch.nn.functional.leaky_relu(a, negative_slope=self. negative_slope, inplace=self.inplace) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.01 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleLeakyReluModuleNew(torch.nn.Module): def __init__(self, negative_slope=0.01, inplace=False): super(SimpleLeakyReluModuleNew, self).__init__() self.negative_slope = negative_slope self.inplace = inplace def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
mciprian13/glow
SimpleLeakyReluModule
false
3,993
[ "Apache-2.0" ]
0
90f88205d9bf8baff8df5bbda51c9d138e3e668b
https://github.com/mciprian13/glow/tree/90f88205d9bf8baff8df5bbda51c9d138e3e668b
SimpleArgSortModule
import torch import torch.jit import torch.onnx import torch.nn class SimpleArgSortModule(torch.nn.Module): def __init__(self, descending=True): super(SimpleArgSortModule, self).__init__() self.descending = descending def forward(self, inputs): return torch.argsort(inputs, dim=-1, descending=self.descending) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_sort_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl. constexpr): xnumel = 64 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0) tmp1 = r1 tmp2 = tmp1.to(tl.int16) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) _tmp5, tmp6 = triton_helpers.sort_with_index(tmp3, tmp4, None, 1, stable=False, descending=True) tmp7 = tmp6.to(tl.int64) tl.store(out_ptr1 + (r1 + 4 * x0), tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64) get_raw_stream(0) triton_per_fused_sort_0[grid(64)](arg0_1, buf2, 64, 4, XBLOCK=8, num_warps=2, num_stages=1) del arg0_1 return buf2, class SimpleArgSortModuleNew(torch.nn.Module): def __init__(self, descending=True): super(SimpleArgSortModuleNew, self).__init__() self.descending = descending def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
mciprian13/glow
SimpleArgSortModule
false
3,994
[ "Apache-2.0" ]
0
90f88205d9bf8baff8df5bbda51c9d138e3e668b
https://github.com/mciprian13/glow/tree/90f88205d9bf8baff8df5bbda51c9d138e3e668b
CriticNN
import torch import torch.optim as optim from torch import nn from torch.nn import functional as F class CriticNN(nn.Module): def __init__(self, in_channels=3): super(CriticNN, self).__init__() self.fc1 = nn.Linear(4, 64) self.fc2 = nn.Linear(64, 1) self.optimizer = optim.Adam(self.parameters(), lr=0.0001) None def forward(self, x): x = F.layer_norm(x, x.size()) x = F.leaky_relu(self.fc1(x)) x = F.layer_norm(x, x.size()) x = self.fc2(x) return x def init_weights(self, m): if type(m) == nn.Linear: None m.weight.data.fill_(0) m.bias.data.fill_(0) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.optim as optim from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_native_layer_norm_0(in_ptr0, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 256, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = tmp0 - tmp8 tmp15 = 256.0 tmp16 = tmp13 / tmp15 tmp17 = 1e-05 tmp18 = tmp16 + tmp17 tmp19 = libdevice.rsqrt(tmp18) tmp20 = tmp14 * tmp19 tl.store(out_ptr2 + tl.broadcast_to(r0, [RBLOCK]), tmp20, None) @triton.jit def triton_red_fused_leaky_relu_native_layer_norm_native_layer_norm_backward_1( in_ptr0, in_ptr1, out_ptr0, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] tmp9_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp9_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp9_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex r0 = rindex % 64 tmp0 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + r0, rmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp9_mean_next, tmp9_m2_next, tmp9_weight_next = (triton_helpers. welford_reduce(tmp8, tmp9_mean, tmp9_m2, tmp9_weight, roffset == 0) ) tmp9_mean = tl.where(rmask, tmp9_mean_next, tmp9_mean) tmp9_m2 = tl.where(rmask, tmp9_m2_next, tmp9_m2) tmp9_weight = tl.where(rmask, tmp9_weight_next, tmp9_weight) tl.store(out_ptr0 + tl.broadcast_to(r2, [XBLOCK, RBLOCK]), tmp4, rmask) tmp9_tmp, tmp10_tmp, tmp11_tmp = triton_helpers.welford(tmp9_mean, tmp9_m2, tmp9_weight, 1) tmp9 = tmp9_tmp[:, None] tmp10 = tmp10_tmp[:, None] tmp11_tmp[:, None] for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex r0 = rindex % 64 tmp12 = tl.load(out_ptr0 + r2, rmask, eviction_policy='evict_first', other=0.0).to(tl.int1) tmp13 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp14 = tl.load(in_ptr1 + r0, rmask, eviction_policy='evict_last', other=0.0) tmp15 = tmp13 + tmp14 tmp16 = 0.01 tmp17 = tmp15 * tmp16 tmp18 = tl.where(tmp12, tmp15, tmp17) tmp19 = tmp18 - tmp9 tmp20 = 4096.0 tmp21 = tmp10 / tmp20 tmp22 = 1e-05 tmp23 = tmp21 + tmp22 tmp24 = libdevice.rsqrt(tmp23) tmp25 = tmp19 * tmp24 tl.store(out_ptr3 + tl.broadcast_to(r2, [XBLOCK, RBLOCK]), tmp25, rmask ) tmp26 = 4096.0 tmp27 = tmp10 / tmp26 tmp28 = 1e-05 tmp29 = tmp27 + tmp28 tmp30 = libdevice.rsqrt(tmp29) tmp31 = 0.000244140625 tmp32 = tmp30 * tmp31 tl.store(out_ptr4 + tl.full([XBLOCK, 1], 0, tl.int32), tmp32, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (64, 4), (4, 1)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (1, 64), (64, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_native_layer_norm_0[grid(1)](primals_1, buf3, 1, 256, num_warps=2, num_stages=1) del primals_1 buf4 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), out=buf4) del primals_2 buf5 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool ) buf9 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch. float32) buf12 = empty_strided_cuda((1, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_red_fused_leaky_relu_native_layer_norm_native_layer_norm_backward_1[ grid(1)](buf4, primals_3, buf5, buf9, buf12, 1, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) del buf4 del primals_3 buf11 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf9, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 1), (1, 64), 0), alpha=1, beta=1, out=buf11) del primals_5 return reinterpret_tensor(buf11, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(buf3, (64, 4), (4, 1), 0 ), buf5, buf9, primals_4, buf12 class CriticNNNew(nn.Module): def __init__(self, in_channels=3): super(CriticNNNew, self).__init__() self.fc1 = nn.Linear(4, 64) self.fc2 = nn.Linear(64, 1) self.optimizer = optim.Adam(self.parameters(), lr=0.0001) None def init_weights(self, m): if type(m) == nn.Linear: None m.weight.data.fill_(0) m.bias.data.fill_(0) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
maxmax1992/Q_learning
CriticNN
false
3,995
[ "MIT" ]
0
8b2b8491d6f94b94b2fce608b93cdc31b418c5b0
https://github.com/maxmax1992/Q_learning/tree/8b2b8491d6f94b94b2fce608b93cdc31b418c5b0
scSE
import torch import torch.nn as nn class cSE(nn.Module): def __init__(self, in_channels): super().__init__() reduced_filters = 1 if in_channels // 2 == 0 else in_channels // 2 self.global_avg_pool = nn.AdaptiveAvgPool2d(output_size=(1, 1)) self.pointwise_1 = nn.Conv2d(in_channels=in_channels, out_channels= reduced_filters, kernel_size=1) self.pointwise_2 = nn.Conv2d(in_channels=reduced_filters, out_channels=in_channels, kernel_size=1) self.sigmoid = nn.Sigmoid() self.relu = nn.ReLU6() def forward(self, input_tensor): x = self.global_avg_pool(input_tensor) x = self.pointwise_1(x) x = self.relu(x) x = self.pointwise_2(x) x = self.sigmoid(x) x = torch.mul(input_tensor, x) return x class sSE(nn.Module): def __init__(self, in_channels): super().__init__() self.pointwise = nn.Conv2d(in_channels=in_channels, out_channels=1, kernel_size=1) self.sigmoid = nn.Sigmoid() def forward(self, input_tensor): x = self.pointwise(input_tensor) x = self.sigmoid(x) x = torch.mul(input_tensor, x) return x class scSE(nn.Module): def __init__(self, in_channels): super().__init__() self.sSE = sSE(in_channels) self.cSE = cSE(in_channels) def forward(self, input_tensor): spatial_att_map = self.sSE(input_tensor) channel_att_map = self.cSE(input_tensor) result = torch.add(spatial_att_map, channel_att_map) return result def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_per_fused_mean_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_hardtanh_hardtanh_backward_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp2 <= tmp3 tmp8 = tmp2 >= tmp5 tmp9 = tmp7 | tmp8 tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 x4 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tl.sigmoid(tmp4) tmp6 = tmp0 * tmp5 tmp7 = tmp3 + tmp6 tl.store(out_ptr0 + x3, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (2, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (2,), (1,)) assert_size_stride(primals_6, (4, 2, 1, 1), (2, 1, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(64)](buf1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf3 = reinterpret_tensor(buf2, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf2 triton_per_fused_mean_1[grid(16)](buf3, primals_3, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 2, 1, 1), (2, 1, 1, 1)) buf5 = empty_strided_cuda((4, 2, 1, 1), (2, 1, 1, 1), torch.float32) buf9 = empty_strided_cuda((4, 2, 1, 1), (2, 1, 1, 1), torch.bool) triton_poi_fused_convolution_hardtanh_hardtanh_backward_2[grid(8)](buf4 , primals_5, buf5, buf9, 8, XBLOCK=8, num_warps=1, num_stages=1) del buf4 del primals_5 buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 1, 1), (4, 1, 1, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_3[grid(16)](buf7, primals_7, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_4[grid(256)](primals_3, buf1, buf7, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) return (buf8, primals_1, primals_3, primals_4, primals_6, buf1, buf3, buf5, buf7, buf9) class cSE(nn.Module): def __init__(self, in_channels): super().__init__() reduced_filters = 1 if in_channels // 2 == 0 else in_channels // 2 self.global_avg_pool = nn.AdaptiveAvgPool2d(output_size=(1, 1)) self.pointwise_1 = nn.Conv2d(in_channels=in_channels, out_channels= reduced_filters, kernel_size=1) self.pointwise_2 = nn.Conv2d(in_channels=reduced_filters, out_channels=in_channels, kernel_size=1) self.sigmoid = nn.Sigmoid() self.relu = nn.ReLU6() def forward(self, input_tensor): x = self.global_avg_pool(input_tensor) x = self.pointwise_1(x) x = self.relu(x) x = self.pointwise_2(x) x = self.sigmoid(x) x = torch.mul(input_tensor, x) return x class sSE(nn.Module): def __init__(self, in_channels): super().__init__() self.pointwise = nn.Conv2d(in_channels=in_channels, out_channels=1, kernel_size=1) self.sigmoid = nn.Sigmoid() def forward(self, input_tensor): x = self.pointwise(input_tensor) x = self.sigmoid(x) x = torch.mul(input_tensor, x) return x class scSENew(nn.Module): def __init__(self, in_channels): super().__init__() self.sSE = sSE(in_channels) self.cSE = cSE(in_channels) def forward(self, input_0): primals_1 = self.sSE.pointwise.weight primals_2 = self.sSE.pointwise.bias primals_4 = self.cSE.pointwise_1.weight primals_5 = self.cSE.pointwise_1.bias primals_6 = self.cSE.pointwise_2.weight primals_7 = self.cSE.pointwise_2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
mattroz/yatopi
scSE
false
3,996
[ "MIT" ]
0
278bac6f3d2f13916ae9d43309b9f38b608426bd
https://github.com/mattroz/yatopi/tree/278bac6f3d2f13916ae9d43309b9f38b608426bd
SimpleAvgPool2dModule
import torch import torch.jit import torch.nn.functional as F import torch.onnx import torch.nn class SimpleAvgPool2dModule(torch.nn.Module): def __init__(self, kernel_size, stride=None, padding=0): super(SimpleAvgPool2dModule, self).__init__() self.kernel_size = kernel_size self.padding = padding self.stride = stride def forward(self, inputs): return F.avg_pool2d(inputs + inputs, self.kernel_size, padding=self .padding, stride=self.stride) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp8 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp26 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp32 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp35 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp38 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp41 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp44 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp1 = tmp0 + tmp0 tmp3 = tmp2 + tmp2 tmp4 = tmp3 + tmp1 tmp6 = tmp5 + tmp5 tmp7 = tmp6 + tmp4 tmp9 = tmp8 + tmp8 tmp10 = tmp9 + tmp7 tmp12 = tmp11 + tmp11 tmp13 = tmp12 + tmp10 tmp15 = tmp14 + tmp14 tmp16 = tmp15 + tmp13 tmp18 = tmp17 + tmp17 tmp19 = tmp18 + tmp16 tmp21 = tmp20 + tmp20 tmp22 = tmp21 + tmp19 tmp24 = tmp23 + tmp23 tmp25 = tmp24 + tmp22 tmp27 = tmp26 + tmp26 tmp28 = tmp27 + tmp25 tmp30 = tmp29 + tmp29 tmp31 = tmp30 + tmp28 tmp33 = tmp32 + tmp32 tmp34 = tmp33 + tmp31 tmp36 = tmp35 + tmp35 tmp37 = tmp36 + tmp34 tmp39 = tmp38 + tmp38 tmp40 = tmp39 + tmp37 tmp42 = tmp41 + tmp41 tmp43 = tmp42 + tmp40 tmp45 = tmp44 + tmp44 tmp46 = tmp45 + tmp43 tmp47 = 0.0625 tmp48 = tmp46 * tmp47 tl.store(out_ptr0 + x0, tmp48, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_avg_pool2d_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return buf0, class SimpleAvgPool2dModuleNew(torch.nn.Module): def __init__(self, kernel_size, stride=None, padding=0): super(SimpleAvgPool2dModuleNew, self).__init__() self.kernel_size = kernel_size self.padding = padding self.stride = stride def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
mciprian13/glow
SimpleAvgPool2dModule
false
3,997
[ "Apache-2.0" ]
0
90f88205d9bf8baff8df5bbda51c9d138e3e668b
https://github.com/mciprian13/glow/tree/90f88205d9bf8baff8df5bbda51c9d138e3e668b
SimpleFmodModule
import torch import torch.jit import torch.onnx import torch.nn class SimpleFmodModule(torch.nn.Module): def __init__(self): super(SimpleFmodModule, self).__init__() def forward(self, a, b): if b.size() == torch.Size([]): c = a.fmod(b.item()) else: c = a.fmod(b) return c.fmod(torch.tensor(1.0, dtype=c.dtype)) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_fmod_lift_fresh_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = libdevice.fmod(tmp0, tmp1) tmp3 = 1.0 tmp4 = libdevice.fmod(tmp2, tmp3) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_fmod_lift_fresh_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class SimpleFmodModuleNew(torch.nn.Module): def __init__(self): super(SimpleFmodModuleNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
mciprian13/glow
SimpleFmodModule
false
3,998
[ "Apache-2.0" ]
0
90f88205d9bf8baff8df5bbda51c9d138e3e668b
https://github.com/mciprian13/glow/tree/90f88205d9bf8baff8df5bbda51c9d138e3e668b
UnaryMaxModule
import torch import torch.jit import torch.onnx import torch.nn class UnaryMaxModule(torch.nn.Module): def __init__(self): super(UnaryMaxModule, self).__init__() def forward(self, a): return torch.max(a + a) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_max_0(in_ptr0, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tmp0 + tmp0 tmp2 = tl.broadcast_to(tmp1, [RBLOCK]) tmp4 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp2, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp4, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_add_max_0[grid(1)](arg0_1, buf0, 1, 256, num_warps =2, num_stages=1) del arg0_1 return buf0, class UnaryMaxModuleNew(torch.nn.Module): def __init__(self): super(UnaryMaxModuleNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
mciprian13/glow
UnaryMaxModule
false
3,999
[ "Apache-2.0" ]
0
90f88205d9bf8baff8df5bbda51c9d138e3e668b
https://github.com/mciprian13/glow/tree/90f88205d9bf8baff8df5bbda51c9d138e3e668b
UnaryMinModule
import torch import torch.jit import torch.onnx import torch.nn class UnaryMinModule(torch.nn.Module): def __init__(self): super(UnaryMinModule, self).__init__() def forward(self, a): return torch.min(a + a) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_min_0(in_ptr0, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tmp0 + tmp0 tmp2 = tl.broadcast_to(tmp1, [RBLOCK]) tmp4 = triton_helpers.promote_to_tensor(triton_helpers.min2(tmp2, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp4, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_add_min_0[grid(1)](arg0_1, buf0, 1, 256, num_warps =2, num_stages=1) del arg0_1 return buf0, class UnaryMinModuleNew(torch.nn.Module): def __init__(self): super(UnaryMinModuleNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
mciprian13/glow
UnaryMinModule
false
4,000
[ "Apache-2.0" ]
0
90f88205d9bf8baff8df5bbda51c9d138e3e668b
https://github.com/mciprian13/glow/tree/90f88205d9bf8baff8df5bbda51c9d138e3e668b
SqueezeEmbedding
import torch import torch.nn as nn class SqueezeEmbedding(nn.Module): """ Squeeze sequence embedding length to the longest one in the batch """ def __init__(self, batch_first=True): super(SqueezeEmbedding, self).__init__() self.batch_first = batch_first def forward(self, x, x_len): """ sequence -> sort -> pad and pack -> unpack ->unsort :param x: sequence embedding vectors :param x_len: numpy/tensor list :return: """ """sort""" x_sort_idx = torch.sort(-x_len)[1].long() x_unsort_idx = torch.sort(x_sort_idx)[1].long() x_len = x_len[x_sort_idx] x = x[x_sort_idx] """pack""" x_emb_p = torch.nn.utils.rnn.pack_padded_sequence(x, x_len.cpu(), batch_first=self.batch_first) """unpack: out""" out = torch.nn.utils.rnn.pad_packed_sequence(x_emb_p, batch_first= self.batch_first) out = out[0] """unsort""" out = out[x_unsort_idx] return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.ones([4], dtype=torch.int64)] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_index_neg_sort_0(in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = -tmp0 tmp2 = r0 tmp3 = tmp2.to(tl.int16) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) _tmp6, tmp7 = triton_helpers.sort_with_index(tmp4, tmp5, None, 1, stable=False, descending=False) tmp8 = tmp7.to(tl.int64) tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) _tmp10, tmp11 = triton_helpers.sort_with_index(tmp9, tmp5, None, 1, stable=False, descending=False) tmp12 = tmp11.to(tl.int64) tmp13 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp14 = tmp8 + tmp13 tmp15 = tmp8 < 0 tmp16 = tl.where(tmp15, tmp14, tmp8) tl.device_assert((0 <= tmp16) & (tmp16 < 4), 'index out of bounds: 0 <= tmp16 < 4') tmp18 = tl.load(in_ptr0 + tmp16, None, eviction_policy='evict_last') tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp7, None) tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp12, None) tl.store(out_ptr3 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp18, None) @triton.jit def triton_poi_fused_index_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 64 x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tmp0.to(tl.int64) tmp2 = tl.full([XBLOCK], 4, tl.int32) tmp3 = tmp1 + tmp2 tmp4 = tmp1 < 0 tmp5 = tl.where(tmp4, tmp3, tmp1) tl.device_assert((0 <= tmp5) & (tmp5 < 4) | ~xmask, 'index out of bounds: 0 <= tmp5 < 4') tmp7 = tl.load(in_ptr1 + (x0 + 64 * tmp5), xmask) tl.store(out_ptr0 + x2, tmp7, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4,), (1,)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4,), (1,), torch.int16) buf4 = empty_strided_cuda((4,), (1,), torch.int64) buf6 = empty_strided_cuda((4,), (1,), torch.int64) get_raw_stream(0) triton_per_fused_index_neg_sort_0[grid(1)](arg0_1, buf1, buf4, buf6, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_index_1[grid(256)](buf1, arg1_1, buf5, 256, XBLOCK =128, num_warps=4, num_stages=1) del arg1_1 del buf1 buf7 = empty_strided_cpu((4,), (1,), torch.int64) buf7.copy_(buf6) return buf5, buf7, buf4 class SqueezeEmbeddingNew(nn.Module): """ Squeeze sequence embedding length to the longest one in the batch """ def __init__(self, batch_first=True): super(SqueezeEmbeddingNew, self).__init__() self.batch_first = batch_first def forward(self, input_0, input_1): arg1_1 = input_0 arg0_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
minionssso/PyABSA
SqueezeEmbedding
false
4,001
[ "MIT" ]
0
fd9a9a6fd55552a60329fd04b6830e1bb144d50f
https://github.com/minionssso/PyABSA/tree/fd9a9a6fd55552a60329fd04b6830e1bb144d50f
BiDAFAttention
import torch import torch.nn as nn import torch.nn.functional as F def masked_softmax(logits, mask, dim=-1, log_softmax=False): """Take the softmax of `logits` over given dimension, and set entries to 0 wherever `mask` is 0. Args: logits (torch.Tensor): Inputs to the softmax function. mask (torch.Tensor): Same shape as `logits`, with 0 indicating positions that should be assigned 0 probability in the output. dim (int): Dimension over which to take softmax. log_softmax (bool): Take log-softmax rather than regular softmax. E.g., some PyTorch functions such as `F.nll_loss` expect log-softmax. Returns: probs (torch.Tensor): Result of taking masked softmax over the logits. """ mask = mask.type(torch.float32) masked_logits = mask * logits + (1 - mask) * -1e+30 softmax_fn = F.log_softmax if log_softmax else F.softmax probs = softmax_fn(masked_logits, dim) return probs class BiDAFAttention(nn.Module): """Bidirectional attention originally used by BiDAF. Bidirectional attention computes attention in two directions: The context attends to the query and the query attends to the context. The output of this layer is the concatenation of [context, c2q_attention, context * c2q_attention, context * q2c_attention]. This concatenation allows the attention vector at each timestep, along with the embeddings from previous layers, to flow through the attention layer to the modeling layer. The output has shape (batch_size, context_len, 8 * hidden_size). Args: hidden_size (int): Size of hidden activations. drop_prob (float): Probability of zero-ing out activations. """ def __init__(self, hidden_size, drop_prob=0.1): super(BiDAFAttention, self).__init__() self.drop_prob = drop_prob self.c_weight = nn.Parameter(torch.zeros(hidden_size, 1)) self.q_weight = nn.Parameter(torch.zeros(hidden_size, 1)) self.cq_weight = nn.Parameter(torch.zeros(1, 1, hidden_size)) for weight in (self.c_weight, self.q_weight, self.cq_weight): nn.init.xavier_uniform_(weight) self.bias = nn.Parameter(torch.zeros(1)) def forward(self, c, q, c_mask, q_mask): batch_size, c_len, _ = c.size() q_len = q.size(1) s = self.get_similarity_matrix(c, q) c_mask = c_mask.view(batch_size, c_len, 1) q_mask = q_mask.view(batch_size, 1, q_len) s1 = masked_softmax(s, q_mask, dim=2) s2 = masked_softmax(s, c_mask, dim=1) a = torch.bmm(s1, q) b = torch.bmm(torch.bmm(s1, s2.transpose(1, 2)), c) x = torch.cat([c, a, c * a, c * b], dim=2) return x def get_similarity_matrix(self, c, q): """Get the "similarity matrix" between context and query (using the terminology of the BiDAF paper). A naive implementation as described in BiDAF would concatenate the three vectors then project the result with a single weight matrix. This method is a more memory-efficient implementation of the same operation. See Also: Equation 1 in https://arxiv.org/abs/1611.01603 """ c_len, q_len = c.size(1), q.size(1) c = F.dropout(c, self.drop_prob, self.training) q = F.dropout(q, self.drop_prob, self.training) s0 = torch.matmul(c, self.c_weight).expand([-1, -1, q_len]) s1 = torch.matmul(q, self.q_weight).transpose(1, 2).expand([-1, c_len, -1]) s2 = torch.matmul(c * self.cq_weight, q.transpose(1, 2)) s = s0 + s1 + s2 + self.bias return s def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 1]), torch.rand([4, 1, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_add_mul_rsub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp4 = tl.load(in_ptr3 + x4, xmask) tmp6 = tl.load(in_ptr4 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp15 = tl.load(in_ptr5 + x3, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp8 = tmp5 + tmp7 tmp9 = tmp0 * tmp8 tmp10 = 1.0 tmp11 = tmp10 - tmp0 tmp12 = -1e+30 tmp13 = tmp11 * tmp12 tmp14 = tmp9 + tmp13 tmp16 = tmp15 * tmp8 tmp17 = tmp10 - tmp15 tmp18 = tmp17 * tmp12 tmp19 = tmp16 + tmp18 tl.store(out_ptr0 + x4, tmp14, xmask) tl.store(out_ptr1 + x4, tmp19, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (4 * x1 + (-8 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.load(in_ptr1 + (4 * x1 + (-8 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp15 * tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp14, tmp17, tmp18) tmp20 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp23 = tl.load(in_ptr0 + (4 * x1 + (-12 + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tl.load(in_ptr2 + (4 * x1 + (-12 + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = tmp23 * tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp20, tmp25, tmp26) tmp28 = tl.where(tmp14, tmp19, tmp27) tmp29 = tl.where(tmp9, tmp10, tmp28) tmp30 = tl.where(tmp4, tmp5, tmp29) tl.store(out_ptr0 + x2, tmp30, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 1), (1, 1)) assert_size_stride(primals_4, (4, 1), (1, 1)) assert_size_stride(primals_5, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_6, (1,), (1,)) assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_8, (4, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_3, out=buf0) del primals_3 buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), primals_4, out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(64)](primals_1, primals_5, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf2, reinterpret_tensor(primals_2, (4, 4, 4), ( 16, 1, 4), 0), out=buf3) buf4 = buf2 del buf2 buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_rsub_1[grid(64)](primals_8, buf0, buf1, buf3, primals_6, primals_7, buf4, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 del buf1 del primals_6 buf5 = buf3 del buf3 triton_poi_fused__softmax_2[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused__softmax_3[grid(64)](buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = buf5 del buf5 triton_poi_fused__softmax_4[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = buf7 del buf7 triton_poi_fused__softmax_5[grid(64)](buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = buf8 del buf8 extern_kernels.bmm(buf6, primals_2, out=buf10) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf6, reinterpret_tensor(buf9, (4, 4, 4), (16, 1, 4), 0), out=buf11) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf11, primals_1, out=buf12) del buf11 buf13 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_cat_6[grid(256)](primals_1, buf10, buf12, buf13, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf10 del buf12 return buf13, primals_1, primals_2, primals_7, primals_8, buf6, buf9 def masked_softmax(logits, mask, dim=-1, log_softmax=False): """Take the softmax of `logits` over given dimension, and set entries to 0 wherever `mask` is 0. Args: logits (torch.Tensor): Inputs to the softmax function. mask (torch.Tensor): Same shape as `logits`, with 0 indicating positions that should be assigned 0 probability in the output. dim (int): Dimension over which to take softmax. log_softmax (bool): Take log-softmax rather than regular softmax. E.g., some PyTorch functions such as `F.nll_loss` expect log-softmax. Returns: probs (torch.Tensor): Result of taking masked softmax over the logits. """ mask = mask.type(torch.float32) masked_logits = mask * logits + (1 - mask) * -1e+30 softmax_fn = F.log_softmax if log_softmax else F.softmax probs = softmax_fn(masked_logits, dim) return probs class BiDAFAttentionNew(nn.Module): """Bidirectional attention originally used by BiDAF. Bidirectional attention computes attention in two directions: The context attends to the query and the query attends to the context. The output of this layer is the concatenation of [context, c2q_attention, context * c2q_attention, context * q2c_attention]. This concatenation allows the attention vector at each timestep, along with the embeddings from previous layers, to flow through the attention layer to the modeling layer. The output has shape (batch_size, context_len, 8 * hidden_size). Args: hidden_size (int): Size of hidden activations. drop_prob (float): Probability of zero-ing out activations. """ def __init__(self, hidden_size, drop_prob=0.1): super(BiDAFAttentionNew, self).__init__() self.drop_prob = drop_prob self.c_weight = nn.Parameter(torch.zeros(hidden_size, 1)) self.q_weight = nn.Parameter(torch.zeros(hidden_size, 1)) self.cq_weight = nn.Parameter(torch.zeros(1, 1, hidden_size)) for weight in (self.c_weight, self.q_weight, self.cq_weight): nn.init.xavier_uniform_(weight) self.bias = nn.Parameter(torch.zeros(1)) def get_similarity_matrix(self, c, q): """Get the "similarity matrix" between context and query (using the terminology of the BiDAF paper). A naive implementation as described in BiDAF would concatenate the three vectors then project the result with a single weight matrix. This method is a more memory-efficient implementation of the same operation. See Also: Equation 1 in https://arxiv.org/abs/1611.01603 """ c_len, q_len = c.size(1), q.size(1) c = F.dropout(c, self.drop_prob, self.training) q = F.dropout(q, self.drop_prob, self.training) s0 = torch.matmul(c, self.c_weight).expand([-1, -1, q_len]) s1 = torch.matmul(q, self.q_weight).transpose(1, 2).expand([-1, c_len, -1]) s2 = torch.matmul(c * self.cq_weight, q.transpose(1, 2)) s = s0 + s1 + s2 + self.bias return s def forward(self, input_0, input_1, input_2, input_3): primals_3 = self.c_weight primals_4 = self.q_weight primals_5 = self.cq_weight primals_6 = self.bias primals_1 = input_0 primals_2 = input_1 primals_7 = input_2 primals_8 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
mayankiitg/cs224n
BiDAFAttention
false
4,002
[ "MIT" ]
0
c67b7904101c8f19a5a231e4fe521e764470d41b
https://github.com/mayankiitg/cs224n/tree/c67b7904101c8f19a5a231e4fe521e764470d41b
Norm
import torch import torch.nn as nn class Norm(nn.Module): def __init__(self, dim_seq, input_size, eps=1e-06): super().__init__() self.size = input_size self.seq = dim_seq self.alpha = nn.Parameter(torch.ones((self.size, self.seq))) self.bias = nn.Parameter(torch.zeros((self.size, self.seq))) self.eps = eps def forward(self, x): norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) / (x.std(dim =-1, keepdim=True) + self.eps) + self.bias return norm def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim_seq': 4, 'input_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 16 x4 = xindex x5 = xindex // 4 tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x4, xmask) tmp2 = tl.load(in_ptr1 + 4 * x5, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + 4 * x5), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + 4 * x5), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + 4 * x5), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp0 * tmp11 tmp13 = tmp2 - tmp10 tmp14 = tmp13 * tmp13 tmp15 = tmp3 - tmp10 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tmp5 - tmp10 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp21 = tmp7 - tmp10 tmp22 = tmp21 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = 3.0 tmp25 = tmp23 / tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = 1e-06 tmp28 = tmp26 + tmp27 tmp29 = tmp12 / tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x4, tmp31, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mean_mul_std_sub_0[grid(256)](primals_1, primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_3 return buf0, primals_2 class NormNew(nn.Module): def __init__(self, dim_seq, input_size, eps=1e-06): super().__init__() self.size = input_size self.seq = dim_seq self.alpha = nn.Parameter(torch.ones((self.size, self.seq))) self.bias = nn.Parameter(torch.zeros((self.size, self.seq))) self.eps = eps def forward(self, input_0): primals_1 = self.alpha primals_3 = self.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
mingweima/hintplaygame
Norm
false
4,003
[ "MIT" ]
0
31f35a22111a2e5e7e5d8e90f92326bc784c5fe7
https://github.com/mingweima/hintplaygame/tree/31f35a22111a2e5e7e5d8e90f92326bc784c5fe7
LinearExcitability
import math import torch from torch import nn from torch.nn.parameter import Parameter def linearExcitability(input, weight, excitability=None, bias=None): """Applies a linear transformation to the incoming data: :math:`y = c(xA^T) + b`. Shape: - input: :math:`(N, *, in_features)` - weight: :math:`(out_features, in_features)` - excitability: :math:`(out_features)` - bias: :math:`(out_features)` - output: :math:`(N, *, out_features)` (NOTE: `*` means any number of additional dimensions)""" if excitability is not None: output = input.matmul(weight.t()) * excitability else: output = input.matmul(weight.t()) if bias is not None: output += bias return output class LinearExcitability(nn.Module): """Module for a linear transformation with multiplicative excitability-parameter (i.e., learnable) and/or -buffer. Args: in_features: size of each input sample out_features: size of each output sample bias: if 'False', layer will not learn an additive bias-parameter (DEFAULT=True) excitability: if 'True', layer will learn a multiplicative excitability-parameter (DEFAULT=False) excit_buffer: if 'True', layer will have excitability-buffer whose value can be set (DEFAULT=False) Shape: - input: :math:`(N, *, in_features)` where `*` means any number of additional dimensions - output: :math:`(N, *, out_features)` where all but the last dimension are the same shape as the input. Attributes: weight: the learnable weights of the module of shape (out_features x in_features) excitability: the learnable multiplication terms (out_features) bias: the learnable bias of the module of shape (out_features) excit_buffer: fixed multiplication variable (out_features)""" def __init__(self, in_features, out_features, bias=True, excitability= False, excit_buffer=False): super(LinearExcitability, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.Tensor(out_features, in_features)) if excitability: self.excitability = Parameter(torch.Tensor(out_features)) else: self.register_parameter('excitability', None) if bias: self.bias = Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias', None) if excit_buffer: buffer = torch.Tensor(out_features).uniform_(1, 1) self.register_buffer('excit_buffer', buffer) else: self.register_buffer('excit_buffer', None) self.reset_parameters() def reset_parameters(self): """Modifies the parameters "in-place" to initialize / reset them at appropriate values.""" stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.excitability is not None: self.excitability.data.uniform_(1, 1) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input): """Running this model's forward step requires/returns: -[input]: [batch_size]x[...]x[in_features] -[output]: [batch_size]x[...]x[hidden_features]""" if self.excit_buffer is None: excitability = self.excitability elif self.excitability is None: excitability = self.excit_buffer else: excitability = self.excitability * self.excit_buffer return linearExcitability(input, self.weight, excitability, self.bias) def __repr__(self): return self.__class__.__name__ + '(' + 'in_features=' + str(self. in_features) + ', out_features=' + str(self.out_features) + ')' def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math from torch import nn from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_view_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x4, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_add_view_0[grid(256)](buf2, primals_2, 256, XBLOCK =256, num_warps=4, num_stages=1) del primals_2 return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) def linearExcitability(input, weight, excitability=None, bias=None): """Applies a linear transformation to the incoming data: :math:`y = c(xA^T) + b`. Shape: - input: :math:`(N, *, in_features)` - weight: :math:`(out_features, in_features)` - excitability: :math:`(out_features)` - bias: :math:`(out_features)` - output: :math:`(N, *, out_features)` (NOTE: `*` means any number of additional dimensions)""" if excitability is not None: output = input.matmul(weight.t()) * excitability else: output = input.matmul(weight.t()) if bias is not None: output += bias return output class LinearExcitabilityNew(nn.Module): """Module for a linear transformation with multiplicative excitability-parameter (i.e., learnable) and/or -buffer. Args: in_features: size of each input sample out_features: size of each output sample bias: if 'False', layer will not learn an additive bias-parameter (DEFAULT=True) excitability: if 'True', layer will learn a multiplicative excitability-parameter (DEFAULT=False) excit_buffer: if 'True', layer will have excitability-buffer whose value can be set (DEFAULT=False) Shape: - input: :math:`(N, *, in_features)` where `*` means any number of additional dimensions - output: :math:`(N, *, out_features)` where all but the last dimension are the same shape as the input. Attributes: weight: the learnable weights of the module of shape (out_features x in_features) excitability: the learnable multiplication terms (out_features) bias: the learnable bias of the module of shape (out_features) excit_buffer: fixed multiplication variable (out_features)""" def __init__(self, in_features, out_features, bias=True, excitability= False, excit_buffer=False): super(LinearExcitabilityNew, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.Tensor(out_features, in_features)) if excitability: self.excitability = Parameter(torch.Tensor(out_features)) else: self.register_parameter('excitability', None) if bias: self.bias = Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias', None) if excit_buffer: buffer = torch.Tensor(out_features).uniform_(1, 1) self.register_buffer('excit_buffer', buffer) else: self.register_buffer('excit_buffer', None) self.reset_parameters() def reset_parameters(self): """Modifies the parameters "in-place" to initialize / reset them at appropriate values.""" stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.excitability is not None: self.excitability.data.uniform_(1, 1) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def __repr__(self): return self.__class__.__name__ + '(' + 'in_features=' + str(self. in_features) + ', out_features=' + str(self.out_features) + ')' def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
mhmorta/continual-learning-1
LinearExcitability
false
4,004
[ "MIT" ]
0
959d5238d4dd015245592993b5d044572ab58c90
https://github.com/mhmorta/continual-learning-1/tree/959d5238d4dd015245592993b5d044572ab58c90
HighwayMaxoutNetwork
import torch import torch.nn as nn import torch.nn.functional as F def masked_softmax(logits, mask, dim=-1, log_softmax=False): """Take the softmax of `logits` over given dimension, and set entries to 0 wherever `mask` is 0. Args: logits (torch.Tensor): Inputs to the softmax function. mask (torch.Tensor): Same shape as `logits`, with 0 indicating positions that should be assigned 0 probability in the output. dim (int): Dimension over which to take softmax. log_softmax (bool): Take log-softmax rather than regular softmax. E.g., some PyTorch functions such as `F.nll_loss` expect log-softmax. Returns: probs (torch.Tensor): Result of taking masked softmax over the logits. """ mask = mask.type(torch.float32) masked_logits = mask * logits + (1 - mask) * -1e+30 softmax_fn = F.log_softmax if log_softmax else F.softmax probs = softmax_fn(masked_logits, dim) return probs class HighwayMaxoutNetwork(nn.Module): """HMN network for dynamic decoder. Based on the Co-attention paper: Args: num_layers (int): Number of layers in the highway encoder. hidden_size (int): Size of hidden activations. """ def __init__(self, mod_out_size, hidden_size, max_out_pool_size): super(HighwayMaxoutNetwork, self).__init__() self.hidden_size = hidden_size self.maxout_pool_size = max_out_pool_size None self.r = nn.Linear(2 * mod_out_size + hidden_size, hidden_size, bias=False) self.W1 = nn.Linear(mod_out_size + hidden_size, max_out_pool_size * hidden_size) self.W2 = nn.Linear(hidden_size, max_out_pool_size * hidden_size) self.W3 = nn.Linear(2 * hidden_size, max_out_pool_size) def forward(self, mod, h_i, u_s_prev, u_e_prev, mask): batch_size, seq_len, _mod_out_size = mod.shape r = F.tanh(self.r(torch.cat((h_i, u_s_prev, u_e_prev), 1))) r_expanded = r.unsqueeze(1).expand(batch_size, seq_len, self. hidden_size).contiguous() W1_inp = torch.cat((mod, r_expanded), 2) m_t_1 = self.W1(W1_inp) m_t_1 = m_t_1.view(batch_size, seq_len, self.maxout_pool_size, self .hidden_size) m_t_1, _ = m_t_1.max(2) assert m_t_1.shape == (batch_size, seq_len, self.hidden_size) m_t_2 = self.W2(m_t_1) m_t_2 = m_t_2.view(batch_size, seq_len, self.maxout_pool_size, self .hidden_size) m_t_2, _ = m_t_2.max(2) alpha_in = torch.cat((m_t_1, m_t_2), 2) alpha = self.W3(alpha_in) logits, _ = alpha.max(2) log_p = masked_softmax(logits, mask, log_softmax=True) return log_p def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'mod_out_size': 4, 'hidden_size': 4, 'max_out_pool_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x3 = xindex // 8 x2 = xindex // 32 x4 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x3 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x2 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = libdevice.tanh(tmp9) tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype) tmp12 = tl.where(tmp6, tmp10, tmp11) tmp13 = tl.where(tmp4, tmp5, tmp12) tl.store(out_ptr0 + x4, tmp13, xmask) @triton.jit def triton_poi_fused_max_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp17 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp32 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1], 0, tl.int64) tmp11 = tl.full([1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 > tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 > tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tmp47 = triton_helpers.maximum(tmp0, tmp1) tmp48 = triton_helpers.maximum(tmp47, tmp17) tmp49 = triton_helpers.maximum(tmp48, tmp32) tl.store(out_ptr0 + x2, tmp46, xmask) tl.store(out_ptr1 + x2, tmp49, xmask) @triton.jit def triton_poi_fused_max_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp17 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp32 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1], 0, tl.int64) tmp11 = tl.full([1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 > tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 > tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tl.store(out_ptr0 + x2, tmp46, xmask) @triton.jit def triton_poi_fused_cat_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (16 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.load(in_ptr1 + (4 + 16 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = triton_helpers.maximum(tmp9, tmp10) tmp12 = tl.load(in_ptr1 + (8 + 16 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp13 = triton_helpers.maximum(tmp11, tmp12) tmp14 = tl.load(in_ptr1 + (12 + 16 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = triton_helpers.maximum(tmp13, tmp14) tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp6, tmp15, tmp16) tmp18 = tl.where(tmp4, tmp5, tmp17) tl.store(out_ptr0 + x2, tmp18, xmask) @triton.jit def triton_poi_fused_add_max_mul_rsub_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp32 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp47 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1], 0, tl.int64) tmp11 = tl.full([1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 > tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 > tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tmp48 = triton_helpers.maximum(tmp0, tmp1) tmp49 = triton_helpers.maximum(tmp48, tmp17) tmp50 = triton_helpers.maximum(tmp49, tmp32) tmp51 = tmp47 * tmp50 tmp52 = 1.0 tmp53 = tmp52 - tmp47 tmp54 = -1e+30 tmp55 = tmp53 * tmp54 tmp56 = tmp51 + tmp55 tl.store(out_ptr0 + x0, tmp46, xmask) tl.store(out_ptr1 + x0, tmp56, xmask) @triton.jit def triton_poi_fused__log_softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 12), (12, 1)) assert_size_stride(primals_6, (16, 8), (8, 1)) assert_size_stride(primals_7, (16,), (1,)) assert_size_stride(primals_8, (16, 4), (4, 1)) assert_size_stride(primals_9, (16,), (1,)) assert_size_stride(primals_10, (4, 8), (8, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(48)](primals_2, primals_3, primals_4, buf0, 48, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 del primals_3 del primals_4 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_5, (12, 4), (1, 12), 0), out=buf1) del primals_5 buf2 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) triton_poi_fused_cat_1[grid(128)](primals_1, buf1, buf2, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf3 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf2, (16, 8), ( 8, 1), 0), reinterpret_tensor(primals_6, (8, 16), (1, 8), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_max_2[grid(64)](buf3, buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = buf3 del buf3 extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_8, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_9 buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) triton_poi_fused_max_3[grid(64)](buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) triton_poi_fused_cat_4[grid(128)](buf5, buf6, buf8, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf6 buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf8, (16, 8), (8, 1), 0), reinterpret_tensor(primals_10, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf9) del primals_11 buf10 = empty_strided_cuda((4, 4), (4, 1), torch.int64) buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_max_mul_rsub_5[grid(16)](buf9, primals_12, buf10, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf9 buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__log_softmax_6[grid(16)](buf11, buf12, 16, XBLOCK= 16, num_warps=1, num_stages=1) buf13 = buf11 del buf11 triton_poi_fused__log_softmax_7[grid(16)](buf12, buf13, 16, XBLOCK= 16, num_warps=1, num_stages=1) del buf12 return buf13, primals_12, buf0, buf1, reinterpret_tensor(buf2, (16, 8), (8, 1), 0), reinterpret_tensor(buf5, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf8, (16, 8), (8, 1), 0 ), buf13, reinterpret_tensor(buf10, (4, 4, 1), (4, 1, 1), 0 ), primals_10, reinterpret_tensor(buf7, (4, 4, 1, 4), (16, 4, 4, 1), 0 ), primals_8, reinterpret_tensor(buf4, (4, 4, 1, 4), (16, 4, 4, 1), 0 ), primals_6 def masked_softmax(logits, mask, dim=-1, log_softmax=False): """Take the softmax of `logits` over given dimension, and set entries to 0 wherever `mask` is 0. Args: logits (torch.Tensor): Inputs to the softmax function. mask (torch.Tensor): Same shape as `logits`, with 0 indicating positions that should be assigned 0 probability in the output. dim (int): Dimension over which to take softmax. log_softmax (bool): Take log-softmax rather than regular softmax. E.g., some PyTorch functions such as `F.nll_loss` expect log-softmax. Returns: probs (torch.Tensor): Result of taking masked softmax over the logits. """ mask = mask.type(torch.float32) masked_logits = mask * logits + (1 - mask) * -1e+30 softmax_fn = F.log_softmax if log_softmax else F.softmax probs = softmax_fn(masked_logits, dim) return probs class HighwayMaxoutNetworkNew(nn.Module): """HMN network for dynamic decoder. Based on the Co-attention paper: Args: num_layers (int): Number of layers in the highway encoder. hidden_size (int): Size of hidden activations. """ def __init__(self, mod_out_size, hidden_size, max_out_pool_size): super(HighwayMaxoutNetworkNew, self).__init__() self.hidden_size = hidden_size self.maxout_pool_size = max_out_pool_size None self.r = nn.Linear(2 * mod_out_size + hidden_size, hidden_size, bias=False) self.W1 = nn.Linear(mod_out_size + hidden_size, max_out_pool_size * hidden_size) self.W2 = nn.Linear(hidden_size, max_out_pool_size * hidden_size) self.W3 = nn.Linear(2 * hidden_size, max_out_pool_size) def forward(self, input_0, input_1, input_2, input_3, input_4): primals_5 = self.r.weight primals_6 = self.W1.weight primals_7 = self.W1.bias primals_8 = self.W2.weight primals_9 = self.W2.bias primals_10 = self.W3.weight primals_11 = self.W3.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 primals_4 = input_3 primals_12 = input_4 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
mayankiitg/cs224n
HighwayMaxoutNetwork
false
4,005
[ "MIT" ]
0
c67b7904101c8f19a5a231e4fe521e764470d41b
https://github.com/mayankiitg/cs224n/tree/c67b7904101c8f19a5a231e4fe521e764470d41b
CoAttention
import torch import torch.nn as nn import torch.nn.functional as F def masked_softmax(logits, mask, dim=-1, log_softmax=False): """Take the softmax of `logits` over given dimension, and set entries to 0 wherever `mask` is 0. Args: logits (torch.Tensor): Inputs to the softmax function. mask (torch.Tensor): Same shape as `logits`, with 0 indicating positions that should be assigned 0 probability in the output. dim (int): Dimension over which to take softmax. log_softmax (bool): Take log-softmax rather than regular softmax. E.g., some PyTorch functions such as `F.nll_loss` expect log-softmax. Returns: probs (torch.Tensor): Result of taking masked softmax over the logits. """ mask = mask.type(torch.float32) masked_logits = mask * logits + (1 - mask) * -1e+30 softmax_fn = F.log_softmax if log_softmax else F.softmax probs = softmax_fn(masked_logits, dim) return probs class CoAttention(nn.Module): """Dynamic co=attention. Bidirectional attention computes attention in two directions: The context attends to the query and the query attends to the context. The output of this layer is the concatenation of [context, c2q_attention, context * c2q_attention, context * q2c_attention]. This concatenation allows the attention vector at each timestep, along with the embeddings from previous layers, to flow through the attention layer to the modeling layer. The output has shape (batch_size, context_len, 8 * hidden_size). Args: hidden_size (int): Size of hidden activations. drop_prob (float): Probability of zero-ing out activations. """ def __init__(self, hidden_size, drop_prob=0.1): super().__init__() self.drop_prob = drop_prob self.linear = nn.Linear(hidden_size, hidden_size) self.c_weight = nn.Parameter(torch.zeros(hidden_size, 1)) self.q_weight = nn.Parameter(torch.zeros(hidden_size, 1)) self.cq_weight = nn.Parameter(torch.zeros(1, 1, hidden_size)) for weight in (self.c_weight, self.q_weight, self.cq_weight): nn.init.xavier_uniform_(weight) self.bias = nn.Parameter(torch.zeros(1)) def forward(self, c, q, c_mask, q_mask): batch_size, c_len, _ = c.size() q_len = q.size(1) s = self.get_similarity_matrix(c, q) c_mask = c_mask.view(batch_size, c_len, 1) q_mask = q_mask.view(batch_size, 1, q_len) s1 = masked_softmax(s, q_mask, dim=2) s2 = masked_softmax(s, c_mask, dim=1) qprime = torch.tanh(self.linear(q)) scoat = torch.matmul(c, qprime.transpose(1, 2)) scoat1 = masked_softmax(scoat, q_mask, dim=2) scoat2 = masked_softmax(scoat, c_mask, dim=1) a = torch.bmm(s1, q) b = torch.bmm(torch.bmm(s1, s2.transpose(1, 2)), c) acoat = torch.bmm(scoat1, qprime) bcoat = torch.bmm(scoat2.transpose(1, 2), c) scoat = torch.bmm(scoat1, bcoat) x = torch.cat([c, a, c * a, c * b, scoat, acoat], dim=2) return x def get_similarity_matrix(self, c, q): """Get the "similarity matrix" between context and query (using the terminology of the BiDAF paper). A naive implementation as described in BiDAF would concatenate the three vectors then project the result with a single weight matrix. This method is a more memory-efficient implementation of the same operation. See Also: Equation 1 in https://arxiv.org/abs/1611.01603 """ c_len, q_len = c.size(1), q.size(1) c = F.dropout(c, self.drop_prob, self.training) q = F.dropout(q, self.drop_prob, self.training) s0 = torch.matmul(c, self.c_weight).expand([-1, -1, q_len]) s1 = torch.matmul(q, self.q_weight).transpose(1, 2).expand([-1, c_len, -1]) s2 = torch.matmul(c * self.cq_weight, q.transpose(1, 2)) s = s0 + s1 + s2 + self.bias return s def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 1]), torch.rand([4, 1, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused__softmax_add_mul_rsub_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp3 - tmp0 tmp5 = -1e+30 tmp6 = tmp4 * tmp5 tmp7 = tmp2 + tmp6 tmp10 = tmp8 * tmp9 tmp11 = tmp3 - tmp8 tmp12 = tmp11 * tmp5 tmp13 = tmp10 + tmp12 tmp14 = triton_helpers.maximum(tmp7, tmp13) tmp17 = tmp15 * tmp16 tmp18 = tmp3 - tmp15 tmp19 = tmp18 * tmp5 tmp20 = tmp17 + tmp19 tmp21 = triton_helpers.maximum(tmp14, tmp20) tmp24 = tmp22 * tmp23 tmp25 = tmp3 - tmp22 tmp26 = tmp25 * tmp5 tmp27 = tmp24 + tmp26 tmp28 = triton_helpers.maximum(tmp21, tmp27) tmp29 = tmp7 - tmp28 tmp30 = tl_math.exp(tmp29) tmp31 = tmp13 - tmp28 tmp32 = tl_math.exp(tmp31) tmp33 = tmp30 + tmp32 tmp34 = tmp20 - tmp28 tmp35 = tl_math.exp(tmp34) tmp36 = tmp33 + tmp35 tmp37 = tmp27 - tmp28 tmp38 = tl_math.exp(tmp37) tmp39 = tmp36 + tmp38 tl.store(out_ptr0 + x2, tmp28, xmask) tl.store(out_ptr1 + x2, tmp39, xmask) @triton.jit def triton_poi_fused__softmax_add_mul_rsub_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask) tmp8 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask) tmp15 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr1 + (8 + x0 + 16 * x1), xmask) tmp22 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask) tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp3 - tmp0 tmp5 = -1e+30 tmp6 = tmp4 * tmp5 tmp7 = tmp2 + tmp6 tmp10 = tmp8 * tmp9 tmp11 = tmp3 - tmp8 tmp12 = tmp11 * tmp5 tmp13 = tmp10 + tmp12 tmp14 = triton_helpers.maximum(tmp7, tmp13) tmp17 = tmp15 * tmp16 tmp18 = tmp3 - tmp15 tmp19 = tmp18 * tmp5 tmp20 = tmp17 + tmp19 tmp21 = triton_helpers.maximum(tmp14, tmp20) tmp24 = tmp22 * tmp23 tmp25 = tmp3 - tmp22 tmp26 = tmp25 * tmp5 tmp27 = tmp24 + tmp26 tmp28 = triton_helpers.maximum(tmp21, tmp27) tmp29 = tmp7 - tmp28 tmp30 = tl_math.exp(tmp29) tmp31 = tmp13 - tmp28 tmp32 = tl_math.exp(tmp31) tmp33 = tmp30 + tmp32 tmp34 = tmp20 - tmp28 tmp35 = tl_math.exp(tmp34) tmp36 = tmp33 + tmp35 tmp37 = tmp27 - tmp28 tmp38 = tl_math.exp(tmp37) tmp39 = tmp36 + tmp38 tl.store(out_ptr0 + x2, tmp28, xmask) tl.store(out_ptr1 + x2, tmp39, xmask) @triton.jit def triton_poi_fused__softmax_add_mul_rsub_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp4 = tl.load(in_ptr3 + x4, xmask) tmp6 = tl.load(in_ptr4 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp15 = tl.load(in_ptr5 + x3, xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr6 + x4, xmask) tmp23 = tl.load(in_ptr7 + x3, xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr8 + x3, xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr9 + (x0 + 4 * x2), xmask, eviction_policy= 'evict_last') tmp33 = tl.load(in_ptr10 + (x0 + 4 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp8 = tmp5 + tmp7 tmp9 = tmp0 * tmp8 tmp10 = 1.0 tmp11 = tmp10 - tmp0 tmp12 = -1e+30 tmp13 = tmp11 * tmp12 tmp14 = tmp9 + tmp13 tmp16 = tmp15 * tmp8 tmp17 = tmp10 - tmp15 tmp18 = tmp17 * tmp12 tmp19 = tmp16 + tmp18 tmp21 = tmp0 * tmp20 tmp22 = tmp21 + tmp13 tmp24 = tmp22 - tmp23 tmp25 = tl_math.exp(tmp24) tmp27 = tmp25 / tmp26 tmp28 = tmp15 * tmp20 tmp29 = tmp28 + tmp18 tmp31 = tmp29 - tmp30 tmp32 = tl_math.exp(tmp31) tmp34 = tmp32 / tmp33 tl.store(out_ptr0 + x4, tmp14, xmask) tl.store(out_ptr1 + x4, tmp19, xmask) tl.store(out_ptr2 + x4, tmp27, xmask) tl.store(out_ptr3 + x4, tmp34, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__softmax_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 24 x1 = xindex // 24 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (4 * x1 + (-8 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.load(in_ptr1 + (4 * x1 + (-8 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp15 * tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp14, tmp17, tmp18) tmp20 = tmp0 >= tmp12 tmp21 = tl.full([1], 16, tl.int64) tmp22 = tmp0 < tmp21 tmp23 = tmp20 & tmp22 tmp24 = tl.load(in_ptr0 + (4 * x1 + (-12 + x0)), tmp23 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = tl.load(in_ptr2 + (4 * x1 + (-12 + x0)), tmp23 & xmask, eviction_policy='evict_last', other=0.0) tmp26 = tmp24 * tmp25 tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp23, tmp26, tmp27) tmp29 = tmp0 >= tmp21 tmp30 = tl.full([1], 20, tl.int64) tmp31 = tmp0 < tmp30 tmp32 = tmp29 & tmp31 tmp33 = tl.load(in_ptr3 + (4 * x1 + (-16 + x0)), tmp32 & xmask, eviction_policy='evict_last', other=0.0) tmp34 = tmp0 >= tmp30 tl.full([1], 24, tl.int64) tmp37 = tl.load(in_ptr4 + (4 * x1 + (-20 + x0)), tmp34 & xmask, eviction_policy='evict_last', other=0.0) tmp38 = tl.where(tmp32, tmp33, tmp37) tmp39 = tl.where(tmp23, tmp28, tmp38) tmp40 = tl.where(tmp14, tmp19, tmp39) tmp41 = tl.where(tmp9, tmp10, tmp40) tmp42 = tl.where(tmp4, tmp5, tmp41) tl.store(out_ptr0 + x2, tmp42, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 1), (1, 1)) assert_size_stride(primals_4, (4, 1), (1, 1)) assert_size_stride(primals_5, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_6, (1,), (1,)) assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_8, (4, 1, 4), (4, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_3, out=buf0) del primals_3 buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), primals_4, out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(64)](primals_1, primals_5, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf2, reinterpret_tensor(primals_2, (4, 4, 4), ( 16, 1, 4), 0), out=buf3) buf10 = reinterpret_tensor(buf2, (16, 4), (4, 1), 0) del buf2 extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf10) del primals_9 buf11 = reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0) del buf10 triton_poi_fused_tanh_1[grid(64)](buf11, primals_10, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_10 buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(primals_1, reinterpret_tensor(buf11, (4, 4, 4), (16, 1, 4), 0), out=buf12) buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused__softmax_add_mul_rsub_2[grid(16)](primals_8, buf12, buf13, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf17 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) triton_poi_fused__softmax_add_mul_rsub_3[grid(16)](primals_7, buf12, buf16, buf17, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_add_mul_rsub_4[grid(64)](primals_8, buf0, buf1, buf3, primals_6, primals_7, buf12, buf13, buf14, buf16, buf17, buf4, buf7, buf15, buf22, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 del buf1 del buf13 del buf14 del buf16 del buf17 del primals_6 buf5 = buf3 del buf3 triton_poi_fused__softmax_5[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused__softmax_6[grid(64)](buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = buf5 del buf5 triton_poi_fused__softmax_7[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = buf7 del buf7 triton_poi_fused__softmax_8[grid(64)](buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf18 = buf8 del buf8 extern_kernels.bmm(buf6, primals_2, out=buf18) buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf6, reinterpret_tensor(buf9, (4, 4, 4), (16, 1, 4), 0), out=buf19) buf20 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf19, primals_1, out=buf20) buf21 = buf19 del buf19 extern_kernels.bmm(buf15, buf11, out=buf21) buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf22, (4, 4, 4), (16, 1, 4), 0), primals_1, out=buf23) buf24 = buf22 del buf22 extern_kernels.bmm(buf15, buf23, out=buf24) buf25 = empty_strided_cuda((4, 4, 24), (96, 24, 1), torch.float32) triton_poi_fused_cat_9[grid(384)](primals_1, buf18, buf20, buf24, buf21, buf25, 384, XBLOCK=256, num_warps=4, num_stages=1) del buf18 del buf20 del buf21 del buf24 return (buf25, primals_1, primals_2, primals_7, primals_8, buf6, buf9, buf11, buf12, buf15, reinterpret_tensor(buf23, (4, 4, 4), (16, 1, 4 ), 0)) def masked_softmax(logits, mask, dim=-1, log_softmax=False): """Take the softmax of `logits` over given dimension, and set entries to 0 wherever `mask` is 0. Args: logits (torch.Tensor): Inputs to the softmax function. mask (torch.Tensor): Same shape as `logits`, with 0 indicating positions that should be assigned 0 probability in the output. dim (int): Dimension over which to take softmax. log_softmax (bool): Take log-softmax rather than regular softmax. E.g., some PyTorch functions such as `F.nll_loss` expect log-softmax. Returns: probs (torch.Tensor): Result of taking masked softmax over the logits. """ mask = mask.type(torch.float32) masked_logits = mask * logits + (1 - mask) * -1e+30 softmax_fn = F.log_softmax if log_softmax else F.softmax probs = softmax_fn(masked_logits, dim) return probs class CoAttentionNew(nn.Module): """Dynamic co=attention. Bidirectional attention computes attention in two directions: The context attends to the query and the query attends to the context. The output of this layer is the concatenation of [context, c2q_attention, context * c2q_attention, context * q2c_attention]. This concatenation allows the attention vector at each timestep, along with the embeddings from previous layers, to flow through the attention layer to the modeling layer. The output has shape (batch_size, context_len, 8 * hidden_size). Args: hidden_size (int): Size of hidden activations. drop_prob (float): Probability of zero-ing out activations. """ def __init__(self, hidden_size, drop_prob=0.1): super().__init__() self.drop_prob = drop_prob self.linear = nn.Linear(hidden_size, hidden_size) self.c_weight = nn.Parameter(torch.zeros(hidden_size, 1)) self.q_weight = nn.Parameter(torch.zeros(hidden_size, 1)) self.cq_weight = nn.Parameter(torch.zeros(1, 1, hidden_size)) for weight in (self.c_weight, self.q_weight, self.cq_weight): nn.init.xavier_uniform_(weight) self.bias = nn.Parameter(torch.zeros(1)) def get_similarity_matrix(self, c, q): """Get the "similarity matrix" between context and query (using the terminology of the BiDAF paper). A naive implementation as described in BiDAF would concatenate the three vectors then project the result with a single weight matrix. This method is a more memory-efficient implementation of the same operation. See Also: Equation 1 in https://arxiv.org/abs/1611.01603 """ c_len, q_len = c.size(1), q.size(1) c = F.dropout(c, self.drop_prob, self.training) q = F.dropout(q, self.drop_prob, self.training) s0 = torch.matmul(c, self.c_weight).expand([-1, -1, q_len]) s1 = torch.matmul(q, self.q_weight).transpose(1, 2).expand([-1, c_len, -1]) s2 = torch.matmul(c * self.cq_weight, q.transpose(1, 2)) s = s0 + s1 + s2 + self.bias return s def forward(self, input_0, input_1, input_2, input_3): primals_3 = self.c_weight primals_4 = self.q_weight primals_5 = self.cq_weight primals_6 = self.bias primals_9 = self.linear.weight primals_10 = self.linear.bias primals_1 = input_0 primals_2 = input_1 primals_7 = input_2 primals_8 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
mayankiitg/cs224n
CoAttention
false
4,006
[ "MIT" ]
0
c67b7904101c8f19a5a231e4fe521e764470d41b
https://github.com/mayankiitg/cs224n/tree/c67b7904101c8f19a5a231e4fe521e764470d41b
MySmallModel
import torch import torch.nn as nn class MySmallModel(nn.Module): def __init__(self, nodes): super().__init__() hidden_nodes = nodes * 2 self.fc1 = nn.Linear(nodes, hidden_nodes) self.fc2 = nn.Linear(hidden_nodes, nodes) self.fc3 = nn.Linear(nodes, 1) def forward(self, x): x = torch.relu(self.fc1(x)) x = torch.relu(self.fc2(x)) x = torch.relu(self.fc3(x)) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nodes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = 0.0 tmp7 = tmp5 <= tmp6 tl.store(in_out_ptr0 + x0, tmp5, xmask) tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (8, 4), (4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 8), (8, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 8), (128, 32, 8, 1), 0) del buf0 buf8 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(512)](buf1, primals_2, buf8, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 8), (8, 1), 0), reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(256)](buf3, primals_5, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf4 buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(64)](buf5, primals_7, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 8), (8, 1), 0), reinterpret_tensor( buf3, (64, 4), (4, 1), 0), buf6, primals_6, buf7, primals_4, buf8 class MySmallModelNew(nn.Module): def __init__(self, nodes): super().__init__() hidden_nodes = nodes * 2 self.fc1 = nn.Linear(nodes, hidden_nodes) self.fc2 = nn.Linear(hidden_nodes, nodes) self.fc3 = nn.Linear(nodes, 1) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
minister19/RL_pytorch_get_started
MySmallModel
false
4,007
[ "MIT" ]
0
e444f524a14d329f9a25c53f102bc96c4ea36ad8
https://github.com/minister19/RL_pytorch_get_started/tree/e444f524a14d329f9a25c53f102bc96c4ea36ad8
AttentionLayer
import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import * class AttentionLayer(nn.Module): def __init__(self, hidden_dim_en, hidden_dim_de, projected_size): super(AttentionLayer, self).__init__() self.linear1 = nn.Linear(hidden_dim_en, projected_size) self.linear2 = nn.Linear(hidden_dim_de, projected_size) self.linear3 = nn.Linear(projected_size, 1, False) def forward(self, out_e, h): """ out_e: batch_size * num_frames * en_hidden_dim h : batch_size * de_hidden_dim """ assert out_e.size(0) == h.size(0) batch_size, num_frames, _ = out_e.size() hidden_dim = h.size(1) h_att = h.unsqueeze(1).expand(batch_size, num_frames, hidden_dim) x = F.tanh(F.dropout(self.linear1(out_e)) + F.dropout(self.linear2( h_att))) x = F.dropout(self.linear3(x)) a = F.softmax(x.squeeze(2)) return a def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'hidden_dim_en': 4, 'hidden_dim_de': 4, 'projected_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn from torch.autograd import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_add_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_3 del primals_4 buf1 = torch.ops.aten.native_dropout.default(reinterpret_tensor( buf0, (4, 4, 4), (16, 4, 1), 0), 0.5, True) buf2 = buf1[0] buf3 = buf1[1] del buf1 buf4 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_2, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf5) del primals_5 buf6 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0) del buf5 triton_poi_fused_add_1[grid(64)](buf6, primals_6, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_6 buf7 = torch.ops.aten.native_dropout.default(buf6, 0.5, True) del buf6 buf8 = buf7[0] buf9 = buf7[1] del buf7 buf10 = buf2 del buf2 triton_poi_fused_add_tanh_2[grid(64)](buf10, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf8 buf11 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), out=buf11) buf12 = torch.ops.aten.native_dropout.default(reinterpret_tensor( buf11, (4, 4, 1), (4, 1, 1), 0), 0.5, True) buf13 = buf12[0] buf14 = buf12[1] del buf12 buf15 = reinterpret_tensor(buf11, (4, 4), (4, 1), 0) del buf11 triton_poi_fused__softmax_3[grid(16)](buf13, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = reinterpret_tensor(buf13, (4, 4), (4, 1), 0) del buf13 triton_poi_fused__softmax_4[grid(16)](buf15, buf16, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf15 return buf16, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), buf3, reinterpret_tensor(buf4, (16, 4), (4, 1), 0 ), buf9, buf10, buf14, buf16, primals_7 class AttentionLayerNew(nn.Module): def __init__(self, hidden_dim_en, hidden_dim_de, projected_size): super(AttentionLayerNew, self).__init__() self.linear1 = nn.Linear(hidden_dim_en, projected_size) self.linear2 = nn.Linear(hidden_dim_de, projected_size) self.linear3 = nn.Linear(projected_size, 1, False) def forward(self, input_0, input_1): primals_2 = self.linear1.weight primals_4 = self.linear1.bias primals_3 = self.linear2.weight primals_6 = self.linear2.bias primals_7 = self.linear3.weight primals_1 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
minhdo3000/visual_storytelling
AttentionLayer
false
4,008
[ "MIT" ]
0
451c5194564fb1bb02929f57eac8f026662637b1
https://github.com/minhdo3000/visual_storytelling/tree/451c5194564fb1bb02929f57eac8f026662637b1
ELBOLoss
import torch from torch import nn class ELBOLoss(nn.Module): def __init__(self): super(ELBOLoss, self).__init__() self.recons_loss = nn.BCELoss(reduction='sum') def forward(self, reconstruction, x, mu, log_var): loss = -self.recons_loss(reconstruction, x) KL_loss = 0.5 * torch.sum(-1 - log_var + mu ** 2 + log_var.exp()) return -(loss - KL_loss) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_binary_cross_entropy_exp_mul_neg_pow_rsub_sub_sum_0( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp16 = tl.load(in_ptr2 + r0, None) tmp19 = tl.load(in_ptr3 + r0, None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = -tmp3 tmp5 = libdevice.log1p(tmp4) tmp6 = -100.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp2 * tmp7 tmp9 = tl_math.log(tmp3) tmp10 = triton_helpers.maximum(tmp9, tmp6) tmp11 = tmp0 * tmp10 tmp12 = tmp8 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp17 = -1.0 tmp18 = tmp17 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tl_math.exp(tmp16) tmp23 = tmp21 + tmp22 tmp24 = tl.broadcast_to(tmp23, [RBLOCK]) tmp26 = triton_helpers.promote_to_tensor(tl.sum(tmp24, 0)) tmp27 = -tmp15 tmp28 = 0.5 tmp29 = tmp26 * tmp28 tmp30 = tmp27 - tmp29 tmp31 = -tmp30 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp31, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_binary_cross_entropy_exp_mul_neg_pow_rsub_sub_sum_0[ grid(1)](buf2, arg0_1, arg1_1, arg2_1, arg3_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 return buf2, class ELBOLossNew(nn.Module): def __init__(self): super(ELBOLossNew, self).__init__() self.recons_loss = nn.BCELoss(reduction='sum') def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
mirmohammad/IFT6135-TP3
ELBOLoss
false
4,009
[ "MIT" ]
0
70453b4ea695313837ab88243b0206552eb50632
https://github.com/mirmohammad/IFT6135-TP3/tree/70453b4ea695313837ab88243b0206552eb50632
JSDLoss
import math import torch from torch import nn class JSDLoss(nn.Module): def __init__(self): super(JSDLoss, self).__init__() def forward(self, d_x, d_y): return -(math.log(2.0) + 0.5 * (torch.mean(torch.log(d_x)) + torch. mean(torch.log(1.0 - d_y)))) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_log_mean_mul_neg_rsub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp5 = tl.load(in_ptr1 + r0, None) tmp1 = tl_math.log(tmp0) tmp2 = tl.broadcast_to(tmp1, [RBLOCK]) tmp4 = triton_helpers.promote_to_tensor(tl.sum(tmp2, 0)) tmp6 = 1.0 tmp7 = tmp6 - tmp5 tmp8 = tl_math.log(tmp7) tmp9 = tl.broadcast_to(tmp8, [RBLOCK]) tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0)) tmp12 = 256.0 tmp13 = tmp4 / tmp12 tmp14 = tmp11 / tmp12 tmp15 = tmp13 + tmp14 tmp16 = 0.5 tmp17 = tmp15 * tmp16 tmp18 = 0.6931471805599453 tmp19 = tmp17 + tmp18 tmp20 = -tmp19 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_log_mean_mul_neg_rsub_0[grid(1)](buf2, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, class JSDLossNew(nn.Module): def __init__(self): super(JSDLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
mirmohammad/IFT6135-TP3
JSDLoss
false
4,010
[ "MIT" ]
0
70453b4ea695313837ab88243b0206552eb50632
https://github.com/mirmohammad/IFT6135-TP3/tree/70453b4ea695313837ab88243b0206552eb50632
Upsample
import torch from torch import nn class Upsample(nn.Module): def __init__(self, dim): super().__init__() self.conv = nn.ConvTranspose2d(dim, dim, 4, 2, 1) def forward(self, x): return self.conv(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 64 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 8, 8), (256, 64, 8, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(1024)](buf1, primals_2, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class UpsampleNew(nn.Module): def __init__(self, dim): super().__init__() self.conv = nn.ConvTranspose2d(dim, dim, 4, 2, 1) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
mishooax/denoising-diffusion-pytorch
Upsample
false
4,011
[ "MIT" ]
0
54df92c06c5cb0dc3bb43232c24c492c6f5a35c7
https://github.com/mishooax/denoising-diffusion-pytorch/tree/54df92c06c5cb0dc3bb43232c24c492c6f5a35c7
Net
import torch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv2 = nn.Conv2d(3, 64, 8, 2, 3) self.conv3 = nn.Conv2d(64, 128, 6, 2, 2) self.conv4 = nn.Conv2d(128, 256, 4, 2, 1) self.conv5 = nn.Conv2d(256, 512, 4, 2, 1) self.fc1 = nn.Linear(512, 4096) self.conv6 = nn.Conv2d(256, 512, 3, 1, 1) self.conv7 = nn.Conv2d(512, 512, 3, 1, 1) self.conv8 = nn.Conv2d(512, 512, 3, 1, 1) self.branch1_fc1 = nn.Conv2d(512, 512, 1, 1, 0) self.branch1_fc2 = nn.Conv2d(512, 512, 1, 1, 0) self.branch1_fc3 = nn.Conv2d(512, 300, 1, 1, 0) self.branch2_fc1 = nn.Conv2d(512, 512, 1, 1, 0) self.branch2_fc2 = nn.Conv2d(512, 512, 1, 1, 0) self.branch2_fc3 = nn.Conv2d(512, 100, 1, 1, 0) def forward(self, x, interp_factor=1): x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x = F.relu(self.conv4(x)) x = F.relu(self.conv5(x)) x = torch.tanh(F.max_pool2d(x, 8)) x = x.view(-1, 1 * 1 * 512) x = F.relu(self.fc1(x)) x = x.view(-1, 256, 4, 4) x = F.relu(self.conv6(x)) x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners =False) x = F.relu(self.conv7(x)) x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners =False) x = F.relu(self.conv8(x)) x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners =False) if interp_factor != 1: x = F.interpolate(x, scale_factor=interp_factor, mode= 'bilinear', align_corners=False) branch1_x = F.relu(self.branch1_fc1(x)) branch1_x = F.relu(self.branch1_fc2(branch1_x)) branch1_x = self.branch1_fc3(branch1_x) branch1_x = branch1_x.view(-1, 100, 3, 32 * interp_factor, 32 * interp_factor) branch2_x = F.relu(self.branch2_fc1(x)) branch2_x = F.relu(self.branch2_fc2(branch2_x)) branch2_x = self.branch2_fc3(branch2_x) branch2_x = branch2_x.view(-1, 100, 1, 32 * interp_factor, 32 * interp_factor) x = torch.cat([branch1_x, branch2_x], 2) return x def get_inputs(): return [torch.rand([4, 3, 128, 128])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 192 xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 64 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 192 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 16384 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 49152 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 36 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 128 * x2 + 2048 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 256 * x2 + 4096 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_tanh_11(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = libdevice.tanh(tmp0) tl.store(in_out_ptr0 + x0, tmp1, None) @triton.jit def triton_poi_fused_relu_threshold_backward_view_12(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_out_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (x2 + 16 * y0), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (y0 + 256 * x2 + 4096 * y1), tmp4, xmask) tl.store(out_ptr1 + (x2 + 16 * y3), tmp6, xmask) @triton.jit def triton_poi_fused__to_copy_13(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_14(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 3, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_15(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_16(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr2, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x3 = xindex // 8 x2 = xindex % 8 y0 = yindex % 512 y1 = yindex // 512 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + y0, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x3, xmask, eviction_policy='evict_last') tmp39 = tl.load(in_ptr7 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK, YBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (y0 + 512 * tmp8 + 2048 * tmp4 + 8192 * y1), xmask ) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1, 1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp15 = tmp14 + tmp1 tmp16 = tmp14 < 0 tmp17 = tl.where(tmp16, tmp15, tmp14) tmp18 = tl.load(in_ptr2 + (y0 + 512 * tmp17 + 2048 * tmp4 + 8192 * y1), xmask) tmp19 = tmp18 + tmp10 tmp20 = triton_helpers.maximum(tmp12, tmp19) tmp21 = tmp20 - tmp13 tmp23 = tmp21 * tmp22 tmp24 = tmp13 + tmp23 tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (y0 + 512 * tmp8 + 2048 * tmp28 + 8192 * y1), xmask) tmp30 = tmp29 + tmp10 tmp31 = triton_helpers.maximum(tmp12, tmp30) tmp32 = tl.load(in_ptr2 + (y0 + 512 * tmp17 + 2048 * tmp28 + 8192 * y1), xmask) tmp33 = tmp32 + tmp10 tmp34 = triton_helpers.maximum(tmp12, tmp33) tmp35 = tmp34 - tmp31 tmp36 = tmp35 * tmp22 tmp37 = tmp31 + tmp36 tmp38 = tmp37 - tmp24 tmp40 = tmp38 * tmp39 tmp41 = tmp24 + tmp40 tl.store(out_ptr2 + (y0 + 512 * x4 + 32768 * y1), tmp41, xmask) @triton.jit def triton_poi_fused__to_copy_17(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_18(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 7, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_19(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_20(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr2, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x3 = xindex // 16 x2 = xindex % 16 y0 = yindex % 512 y1 = yindex // 512 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + y0, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x3, xmask, eviction_policy='evict_last') tmp39 = tl.load(in_ptr7 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK, YBLOCK], 8, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (y0 + 512 * tmp8 + 4096 * tmp4 + 32768 * y1), xmask) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1, 1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp15 = tmp14 + tmp1 tmp16 = tmp14 < 0 tmp17 = tl.where(tmp16, tmp15, tmp14) tmp18 = tl.load(in_ptr2 + (y0 + 512 * tmp17 + 4096 * tmp4 + 32768 * y1), xmask) tmp19 = tmp18 + tmp10 tmp20 = triton_helpers.maximum(tmp12, tmp19) tmp21 = tmp20 - tmp13 tmp23 = tmp21 * tmp22 tmp24 = tmp13 + tmp23 tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (y0 + 512 * tmp8 + 4096 * tmp28 + 32768 * y1), xmask) tmp30 = tmp29 + tmp10 tmp31 = triton_helpers.maximum(tmp12, tmp30) tmp32 = tl.load(in_ptr2 + (y0 + 512 * tmp17 + 4096 * tmp28 + 32768 * y1 ), xmask) tmp33 = tmp32 + tmp10 tmp34 = triton_helpers.maximum(tmp12, tmp33) tmp35 = tmp34 - tmp31 tmp36 = tmp35 * tmp22 tmp37 = tmp31 + tmp36 tmp38 = tmp37 - tmp24 tmp40 = tmp38 * tmp39 tmp41 = tmp24 + tmp40 tl.store(out_ptr2 + (y0 + 512 * x4 + 131072 * y1), tmp41, xmask) @triton.jit def triton_poi_fused__to_copy_21(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_22(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 15, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_23(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_24(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr2, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 1024 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x3 = xindex // 32 x2 = xindex % 32 y0 = yindex % 512 y1 = yindex // 512 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + y0, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x3, xmask, eviction_policy='evict_last') tmp39 = tl.load(in_ptr7 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK, YBLOCK], 16, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (y0 + 512 * tmp8 + 8192 * tmp4 + 131072 * y1), xmask) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1, 1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp15 = tmp14 + tmp1 tmp16 = tmp14 < 0 tmp17 = tl.where(tmp16, tmp15, tmp14) tmp18 = tl.load(in_ptr2 + (y0 + 512 * tmp17 + 8192 * tmp4 + 131072 * y1 ), xmask) tmp19 = tmp18 + tmp10 tmp20 = triton_helpers.maximum(tmp12, tmp19) tmp21 = tmp20 - tmp13 tmp23 = tmp21 * tmp22 tmp24 = tmp13 + tmp23 tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (y0 + 512 * tmp8 + 8192 * tmp28 + 131072 * y1 ), xmask) tmp30 = tmp29 + tmp10 tmp31 = triton_helpers.maximum(tmp12, tmp30) tmp32 = tl.load(in_ptr2 + (y0 + 512 * tmp17 + 8192 * tmp28 + 131072 * y1), xmask) tmp33 = tmp32 + tmp10 tmp34 = triton_helpers.maximum(tmp12, tmp33) tmp35 = tmp34 - tmp31 tmp36 = tmp35 * tmp22 tmp37 = tmp31 + tmp36 tmp38 = tmp37 - tmp24 tmp40 = tmp38 * tmp39 tmp41 = tmp24 + tmp40 tl.store(out_ptr2 + (y0 + 512 * x4 + 524288 * y1), tmp41, xmask) @triton.jit def triton_poi_fused_convolution_relu_25(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_cat_26(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 1024 % 4 x0 = xindex % 1024 x2 = xindex // 4096 % 100 x3 = xindex // 409600 x4 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 3, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (3 * x2 + 300 * x0 + 307200 * x3 + x1), tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (3 * x2 + x1), tmp4, eviction_policy= 'evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 4, tl.int64) tmp13 = tl.load(in_ptr2 + (x2 + 100 * x0 + 102400 * x3), tmp10, eviction_policy='evict_last', other=0.0) tmp14 = tl.load(in_ptr3 + x2, tmp10, eviction_policy='evict_last', other=0.0) tmp15 = tmp13 + tmp14 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp10, tmp15, tmp16) tmp18 = tl.where(tmp4, tmp9, tmp17) tl.store(out_ptr0 + x4, tmp18, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_27(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_28(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_29(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29) = args args.clear() assert_size_stride(primals_1, (64, 3, 8, 8), (192, 64, 8, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 3, 128, 128), (49152, 16384, 128, 1)) assert_size_stride(primals_4, (128, 64, 6, 6), (2304, 36, 6, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (256, 128, 4, 4), (2048, 16, 4, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (512, 256, 4, 4), (4096, 16, 4, 1)) assert_size_stride(primals_9, (512,), (1,)) assert_size_stride(primals_10, (4096, 512), (512, 1)) assert_size_stride(primals_11, (4096,), (1,)) assert_size_stride(primals_12, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (512,), (1,)) assert_size_stride(primals_14, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_15, (512,), (1,)) assert_size_stride(primals_16, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_17, (512,), (1,)) assert_size_stride(primals_18, (512, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_19, (512,), (1,)) assert_size_stride(primals_20, (512, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_21, (512,), (1,)) assert_size_stride(primals_22, (300, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_23, (300,), (1,)) assert_size_stride(primals_24, (512, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_25, (512,), (1,)) assert_size_stride(primals_26, (512, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_27, (512,), (1,)) assert_size_stride(primals_28, (100, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_29, (100,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 3, 8, 8), (192, 1, 24, 3), torch.float32 ) get_raw_stream(0) triton_poi_fused_0[grid(192, 64)](primals_1, buf0, 192, 64, XBLOCK= 32, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 128, 128), (49152, 1, 384, 3), torch.float32) triton_poi_fused_1[grid(12, 16384)](primals_3, buf1, 12, 16384, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((128, 64, 6, 6), (2304, 1, 384, 64), torch.float32) triton_poi_fused_2[grid(8192, 36)](primals_4, buf2, 8192, 36, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((256, 128, 4, 4), (2048, 1, 512, 128), torch.float32) triton_poi_fused_3[grid(32768, 16)](primals_6, buf3, 32768, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((512, 256, 4, 4), (4096, 1, 1024, 256), torch.float32) triton_poi_fused_4[grid(131072, 16)](primals_8, buf4, 131072, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_5[grid(131072, 9)](primals_12, buf5, 131072, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_12 buf6 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_6[grid(262144, 9)](primals_14, buf6, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_14 buf7 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_6[grid(262144, 9)](primals_16, buf7, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_16 buf8 = extern_kernels.convolution(buf1, buf0, stride=(2, 2), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_7[grid(1048576)](buf9, primals_2, 1048576, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf10 = extern_kernels.convolution(buf9, buf2, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf11 = buf10 del buf10 triton_poi_fused_convolution_relu_8[grid(524288)](buf11, primals_5, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf12 = extern_kernels.convolution(buf11, buf3, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf13 = buf12 del buf12 triton_poi_fused_convolution_relu_9[grid(262144)](buf13, primals_7, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf14 = extern_kernels.convolution(buf13, buf4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf15 = buf14 del buf14 triton_poi_fused_convolution_relu_10[grid(131072)](buf15, primals_9, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf16 = torch.ops.aten.max_pool2d_with_indices.default(buf15, [8, 8]) buf17 = buf16[0] buf18 = buf16[1] del buf16 buf19 = buf17 del buf17 triton_poi_fused_tanh_11[grid(2048)](buf19, 2048, XBLOCK=256, num_warps=4, num_stages=1) buf20 = empty_strided_cuda((4, 4096), (4096, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf19, (4, 512), (512, 1), 0), reinterpret_tensor(primals_10, (512, 4096), (1, 512), 0), out=buf20 ) buf21 = buf20 del buf20 buf22 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256), torch.float32) buf67 = empty_strided_cuda((4, 4096), (4096, 1), torch.bool) triton_poi_fused_relu_threshold_backward_view_12[grid(1024, 16)](buf21, primals_11, buf22, buf67, 1024, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del buf21 del primals_11 buf23 = extern_kernels.convolution(buf22, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 512, 4, 4), (8192, 1, 2048, 512)) buf24 = empty_strided_cuda((8, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_13[grid(8)](buf24, 8, XBLOCK=8, num_warps =1, num_stages=1) buf25 = empty_strided_cuda((8, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_14[grid(8)](buf25, 8, XBLOCK=8, num_warps=1, num_stages=1) buf26 = empty_strided_cuda((8,), (1,), torch.int64) triton_poi_fused__to_copy_13[grid(8)](buf26, 8, XBLOCK=8, num_warps =1, num_stages=1) buf27 = empty_strided_cuda((8,), (1,), torch.int64) triton_poi_fused_add_clamp_14[grid(8)](buf27, 8, XBLOCK=8, num_warps=1, num_stages=1) buf28 = empty_strided_cuda((8,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_15[grid(8)](buf28, 8, XBLOCK=8, num_warps=1, num_stages=1) buf30 = empty_strided_cuda((8, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_15[grid(8)](buf30, 8, XBLOCK=8, num_warps=1, num_stages=1) buf32 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_16[grid (2048, 64)](buf24, buf26, buf23, primals_13, buf27, buf28, buf25, buf30, buf32, 2048, 64, XBLOCK=32, YBLOCK=32, num_warps= 4, num_stages=1) buf33 = extern_kernels.convolution(buf32, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf33, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf34 = empty_strided_cuda((16, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_17[grid(16)](buf34, 16, XBLOCK=16, num_warps=1, num_stages=1) buf35 = empty_strided_cuda((16, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_18[grid(16)](buf35, 16, XBLOCK=16, num_warps=1, num_stages=1) buf36 = empty_strided_cuda((16,), (1,), torch.int64) triton_poi_fused__to_copy_17[grid(16)](buf36, 16, XBLOCK=16, num_warps=1, num_stages=1) buf37 = empty_strided_cuda((16,), (1,), torch.int64) triton_poi_fused_add_clamp_18[grid(16)](buf37, 16, XBLOCK=16, num_warps=1, num_stages=1) buf38 = empty_strided_cuda((16,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_19[grid(16)](buf38, 16, XBLOCK=16, num_warps=1, num_stages=1) buf40 = empty_strided_cuda((16, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_19[grid(16)](buf40, 16, XBLOCK=16, num_warps=1, num_stages=1) buf42 = empty_strided_cuda((4, 512, 16, 16), (131072, 1, 8192, 512), torch.float32) triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_20[grid (2048, 256)](buf34, buf36, buf33, primals_15, buf37, buf38, buf35, buf40, buf42, 2048, 256, XBLOCK=1, YBLOCK=1024, num_warps=4, num_stages=1) buf43 = extern_kernels.convolution(buf42, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf43, (4, 512, 16, 16), (131072, 1, 8192, 512)) buf44 = empty_strided_cuda((32, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_21[grid(32)](buf44, 32, XBLOCK=32, num_warps=1, num_stages=1) buf45 = empty_strided_cuda((32, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_22[grid(32)](buf45, 32, XBLOCK=32, num_warps=1, num_stages=1) buf46 = empty_strided_cuda((32,), (1,), torch.int64) triton_poi_fused__to_copy_21[grid(32)](buf46, 32, XBLOCK=32, num_warps=1, num_stages=1) buf47 = empty_strided_cuda((32,), (1,), torch.int64) triton_poi_fused_add_clamp_22[grid(32)](buf47, 32, XBLOCK=32, num_warps=1, num_stages=1) buf48 = empty_strided_cuda((32,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_23[grid(32)](buf48, 32, XBLOCK=32, num_warps=1, num_stages=1) buf50 = empty_strided_cuda((32, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_23[grid(32)](buf50, 32, XBLOCK=32, num_warps=1, num_stages=1) buf52 = empty_strided_cuda((4, 512, 32, 32), (524288, 1, 16384, 512 ), torch.float32) triton_poi_fused__unsafe_index_add_convolution_mul_relu_sub_24[grid (2048, 1024)](buf44, buf46, buf43, primals_17, buf47, buf48, buf45, buf50, buf52, 2048, 1024, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) buf53 = extern_kernels.convolution(buf52, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf53, (4, 512, 32, 32), (524288, 1, 16384, 512)) buf54 = buf53 del buf53 triton_poi_fused_convolution_relu_25[grid(2097152)](buf54, primals_19, 2097152, XBLOCK=512, num_warps=8, num_stages=1) del primals_19 buf55 = extern_kernels.convolution(buf54, primals_20, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf55, (4, 512, 32, 32), (524288, 1, 16384, 512)) buf56 = buf55 del buf55 triton_poi_fused_convolution_relu_25[grid(2097152)](buf56, primals_21, 2097152, XBLOCK=512, num_warps=8, num_stages=1) del primals_21 buf57 = extern_kernels.convolution(buf56, primals_22, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf57, (4, 300, 32, 32), (307200, 1, 9600, 300)) buf58 = extern_kernels.convolution(buf52, primals_24, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf58, (4, 512, 32, 32), (524288, 1, 16384, 512)) buf59 = buf58 del buf58 triton_poi_fused_convolution_relu_25[grid(2097152)](buf59, primals_25, 2097152, XBLOCK=512, num_warps=8, num_stages=1) del primals_25 buf60 = extern_kernels.convolution(buf59, primals_26, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf60, (4, 512, 32, 32), (524288, 1, 16384, 512)) buf61 = buf60 del buf60 triton_poi_fused_convolution_relu_25[grid(2097152)](buf61, primals_27, 2097152, XBLOCK=512, num_warps=8, num_stages=1) del primals_27 buf62 = extern_kernels.convolution(buf61, primals_28, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf62, (4, 100, 32, 32), (102400, 1, 3200, 100)) buf63 = empty_strided_cuda((4, 100, 4, 32, 32), (409600, 4096, 1024, 32, 1), torch.float32) triton_poi_fused_cat_26[grid(1638400)](buf57, primals_23, buf62, primals_29, buf63, 1638400, XBLOCK=512, num_warps=8, num_stages=1) del buf57 del buf62 del primals_23 del primals_29 buf64 = empty_strided_cuda((4, 512, 16, 16), (131072, 1, 8192, 512), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_27[grid(524288)]( buf43, primals_17, buf64, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del buf43 del primals_17 buf65 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_28[grid(131072)]( buf33, primals_15, buf65, 131072, XBLOCK=512, num_warps=8, num_stages=1) del buf33 del primals_15 buf66 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_29[grid(32768)]( buf23, primals_13, buf66, 32768, XBLOCK=256, num_warps=4, num_stages=1) del buf23 del primals_13 return (buf63, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, buf9, buf11, buf13, buf15, buf18, buf19, buf22, buf24, buf25, buf26, buf27, buf28, buf30, buf32, buf34, buf35, buf36, buf37, buf38, buf40, buf42, buf44, buf45, buf46, buf47, buf48, buf50, buf52, buf54, buf56, buf59, buf61, buf64, buf65, buf66, buf67, primals_10) class NetNew(nn.Module): def __init__(self): super(NetNew, self).__init__() self.conv2 = nn.Conv2d(3, 64, 8, 2, 3) self.conv3 = nn.Conv2d(64, 128, 6, 2, 2) self.conv4 = nn.Conv2d(128, 256, 4, 2, 1) self.conv5 = nn.Conv2d(256, 512, 4, 2, 1) self.fc1 = nn.Linear(512, 4096) self.conv6 = nn.Conv2d(256, 512, 3, 1, 1) self.conv7 = nn.Conv2d(512, 512, 3, 1, 1) self.conv8 = nn.Conv2d(512, 512, 3, 1, 1) self.branch1_fc1 = nn.Conv2d(512, 512, 1, 1, 0) self.branch1_fc2 = nn.Conv2d(512, 512, 1, 1, 0) self.branch1_fc3 = nn.Conv2d(512, 300, 1, 1, 0) self.branch2_fc1 = nn.Conv2d(512, 512, 1, 1, 0) self.branch2_fc2 = nn.Conv2d(512, 512, 1, 1, 0) self.branch2_fc3 = nn.Conv2d(512, 100, 1, 1, 0) def forward(self, input_0): primals_1 = self.conv2.weight primals_2 = self.conv2.bias primals_4 = self.conv3.weight primals_5 = self.conv3.bias primals_6 = self.conv4.weight primals_7 = self.conv4.bias primals_8 = self.conv5.weight primals_9 = self.conv5.bias primals_10 = self.fc1.weight primals_11 = self.fc1.bias primals_12 = self.conv6.weight primals_13 = self.conv6.bias primals_14 = self.conv7.weight primals_15 = self.conv7.bias primals_16 = self.conv8.weight primals_17 = self.conv8.bias primals_18 = self.branch1_fc1.weight primals_19 = self.branch1_fc1.bias primals_20 = self.branch1_fc2.weight primals_21 = self.branch1_fc2.bias primals_22 = self.branch1_fc3.weight primals_23 = self.branch1_fc3.bias primals_24 = self.branch2_fc1.weight primals_25 = self.branch2_fc1.bias primals_26 = self.branch2_fc2.weight primals_27 = self.branch2_fc2.bias primals_28 = self.branch2_fc3.weight primals_29 = self.branch2_fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29]) return output[0]
leduchuy225/HairNet
Net
false
4,012
[ "MIT" ]
0
2d3f0b82a686d2ccc7fee4429ef5925ffabd8982
https://github.com/leduchuy225/HairNet/tree/2d3f0b82a686d2ccc7fee4429ef5925ffabd8982
Attention
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class Norm(nn.Module): def __init__(self, dim_seq, input_size, eps=1e-06): super().__init__() self.size = input_size self.seq = dim_seq self.alpha = nn.Parameter(torch.ones((self.size, self.seq))) self.bias = nn.Parameter(torch.zeros((self.size, self.seq))) self.eps = eps def forward(self, x): norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) / (x.std(dim =-1, keepdim=True) + self.eps) + self.bias return norm class Attention(nn.Module): def __init__(self, dim_seq, input_size, dropout=0.1): super().__init__() self.dim_seq = dim_seq self.dk = input_size self.q_linear = nn.Linear(dim_seq, dim_seq) self.k_linear = nn.Linear(dim_seq, dim_seq) self.v_linear = nn.Linear(dim_seq, dim_seq) self.norm_1 = Norm(dim_seq, input_size) self.norm_2 = Norm(dim_seq, input_size) self.dropout_1 = nn.Dropout(dropout) self.dropout_2 = nn.Dropout(dropout) def forward(self, s): s = self.norm_1(s).float() q = self.q_linear(s) k = self.k_linear(s) v = self.v_linear(s) scores = torch.matmul(q, k.transpose(-2, -1)) / np.sqrt(self.dk) scores = F.softmax(scores, dim=-1) scores = self.dropout_1(scores) output = torch.matmul(scores, v) s = self.norm_2(s + self.dropout_2(output)) return s def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim_seq': 4, 'input_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 16 x4 = xindex x5 = xindex // 4 tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x4, xmask) tmp2 = tl.load(in_ptr1 + 4 * x5, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + 4 * x5), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + 4 * x5), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + 4 * x5), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp0 * tmp11 tmp13 = tmp2 - tmp10 tmp14 = tmp13 * tmp13 tmp15 = tmp3 - tmp10 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tmp5 - tmp10 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp21 = tmp7 - tmp10 tmp22 = tmp21 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = 3.0 tmp25 = tmp23 / tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = 1e-06 tmp28 = tmp26 + tmp27 tmp29 = tmp12 / tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x4, tmp31, xmask) @triton.jit def triton_poi_fused__softmax_sqrt_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp8 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = tl.full([1], 2.0, tl.float64) tmp2 = tl.full([1], 0.0, tl.float64) tmp3 = tmp1 >= tmp2 tmp4 = 1.0 tmp5 = -1.0 tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = tmp0 * tmp6 tmp9 = tmp8 * tmp6 tmp11 = tmp10 * tmp6 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp14 = tmp13 * tmp6 tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp17 = tmp16 * tmp6 tmp18 = triton_helpers.maximum(tmp15, tmp17) tmp19 = tmp7 - tmp18 tmp20 = tmp6.to(tl.float64) tmp21 = tmp20 * tmp1 tmp22 = tmp21.to(tl.float32) tmp23 = tmp19 / tmp22 tmp24 = tl_math.exp(tmp23) tl.store(out_ptr0 + x2, tmp24, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_out_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mean_mul_std_sub_0[grid(256)](primals_1, primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_3 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf0, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_5 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf0, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_7 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf0, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_9 buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_sqrt_1[grid(256)](buf4, buf5, 256, XBLOCK =256, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0) del buf5 extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf7) buf8 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf7 triton_poi_fused_add_3[grid(256)](buf8, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_std_sub_0[grid(256)](primals_10, buf8, primals_11, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_11 return buf9, primals_2, primals_10, reinterpret_tensor(buf0, (64, 4), ( 4, 1), 0), buf6, buf8, reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0 ), primals_8, primals_6, primals_4 class Norm(nn.Module): def __init__(self, dim_seq, input_size, eps=1e-06): super().__init__() self.size = input_size self.seq = dim_seq self.alpha = nn.Parameter(torch.ones((self.size, self.seq))) self.bias = nn.Parameter(torch.zeros((self.size, self.seq))) self.eps = eps def forward(self, x): norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) / (x.std(dim =-1, keepdim=True) + self.eps) + self.bias return norm class AttentionNew(nn.Module): def __init__(self, dim_seq, input_size, dropout=0.1): super().__init__() self.dim_seq = dim_seq self.dk = input_size self.q_linear = nn.Linear(dim_seq, dim_seq) self.k_linear = nn.Linear(dim_seq, dim_seq) self.v_linear = nn.Linear(dim_seq, dim_seq) self.norm_1 = Norm(dim_seq, input_size) self.norm_2 = Norm(dim_seq, input_size) self.dropout_1 = nn.Dropout(dropout) self.dropout_2 = nn.Dropout(dropout) def forward(self, input_0): primals_1 = self.q_linear.weight primals_5 = self.q_linear.bias primals_3 = self.k_linear.weight primals_7 = self.k_linear.bias primals_4 = self.v_linear.weight primals_9 = self.v_linear.bias primals_6 = self.norm_1.alpha primals_8 = self.norm_1.bias primals_10 = self.norm_2.alpha primals_11 = self.norm_2.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
mingweima/hintplaygame
Attention
false
4,013
[ "MIT" ]
0
31f35a22111a2e5e7e5d8e90f92326bc784c5fe7
https://github.com/mingweima/hintplaygame/tree/31f35a22111a2e5e7e5d8e90f92326bc784c5fe7
Net
import torch from torch import Tensor from torch.functional import Tensor import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 60, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(60, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x: 'Tensor'): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def get_inputs(): return [torch.rand([4, 3, 32, 32])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 188160 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 784 % 60 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 47040 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x3 = xindex // 14 x2 = xindex // 11760 x4 = xindex % 11760 tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + 11776 * x2), tmp6, xmask) tl.store(out_ptr1 + (x4 + 11776 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 100 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 120 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 336 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 84 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (60, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (60,), (1,)) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (16, 60, 5, 5), (1500, 25, 5, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (120, 400), (400, 1)) assert_size_stride(primals_7, (120,), (1,)) assert_size_stride(primals_8, (84, 120), (120, 1)) assert_size_stride(primals_9, (84,), (1,)) assert_size_stride(primals_10, (10, 84), (84, 1)) assert_size_stride(primals_11, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 60, 28, 28), (47040, 784, 28, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(188160)](buf1, primals_2, 188160, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 60, 14, 14), (11776, 196, 14, 1), torch.float32) buf3 = empty_strided_cuda((4, 60, 14, 14), (11776, 196, 14, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(47040)](buf1, buf2, buf3, 47040, XBLOCK=512, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(6400)](buf5, primals_5, 6400, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8) buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32 ) triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf5, buf6, buf7, 1600, XBLOCK=128, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 120), (1, 400), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(480)](buf9, primals_7, 480, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 84), (84, 1), torch.float32) extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (120, 84), (1, 120), 0), out=buf10) buf11 = buf10 del buf10 triton_poi_fused_relu_5[grid(336)](buf11, primals_9, 336, XBLOCK= 256, num_warps=4, num_stages=1) del primals_9 buf12 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_11, buf11, reinterpret_tensor( primals_10, (84, 10), (1, 84), 0), alpha=1, beta=1, out=buf12) del primals_11 return (buf12, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf9, buf11, primals_10, primals_8, primals_6) class NetNew(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 60, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(60, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_10 = self.fc3.weight primals_11 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
minister19/RL_pytorch_get_started
Net
false
4,014
[ "MIT" ]
0
e444f524a14d329f9a25c53f102bc96c4ea36ad8
https://github.com/minister19/RL_pytorch_get_started/tree/e444f524a14d329f9a25c53f102bc96c4ea36ad8
SelfAttention
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F def masked_softmax(logits, mask, dim=-1, log_softmax=False): """Take the softmax of `logits` over given dimension, and set entries to 0 wherever `mask` is 0. Args: logits (torch.Tensor): Inputs to the softmax function. mask (torch.Tensor): Same shape as `logits`, with 0 indicating positions that should be assigned 0 probability in the output. dim (int): Dimension over which to take softmax. log_softmax (bool): Take log-softmax rather than regular softmax. E.g., some PyTorch functions such as `F.nll_loss` expect log-softmax. Returns: probs (torch.Tensor): Result of taking masked softmax over the logits. """ mask = mask.type(torch.float32) masked_logits = mask * logits + (1 - mask) * -1e+30 softmax_fn = F.log_softmax if log_softmax else F.softmax probs = softmax_fn(masked_logits, dim) return probs class SelfAttention(nn.Module): """Bidirectional attention originally used by BiDAF. Bidirectional attention computes attention in two directions: The context attends to the query and the query attends to the context. The output of this layer is the concatenation of [context, c2q_attention, context * c2q_attention, context * q2c_attention]. This concatenation allows the attention vector at each timestep, along with the embeddings from previous layers, to flow through the attention layer to the modeling layer. The output has shape (batch_size, context_len, 8 * hidden_size). Args: hidden_size (int): Size of hidden activations. drop_prob (float): Probability of zero-ing out activations. """ def __init__(self, hidden_size, drop_prob=0.1): super().__init__() self.drop_prob = drop_prob self.c_weight = nn.Parameter(torch.zeros(hidden_size, 1)) self.q_weight = nn.Parameter(torch.zeros(hidden_size, 1)) self.p_weight1 = nn.Parameter(torch.zeros(4 * hidden_size, int(np. sqrt(hidden_size)))) self.p_weight2 = nn.Parameter(torch.zeros(4 * hidden_size, int(np. sqrt(hidden_size)))) self.cq_weight = nn.Parameter(torch.zeros(1, 1, hidden_size)) for weight in (self.c_weight, self.q_weight, self.cq_weight): nn.init.xavier_uniform_(weight) for weight in (self.p_weight1, self.p_weight2): nn.init.xavier_uniform_(weight) self.bias = nn.Parameter(torch.zeros(1)) def forward(self, c, q, c_mask, q_mask): batch_size, c_len, _ = c.size() q_len = q.size(1) s = self.get_similarity_matrix(c, q) c_mask = c_mask.view(batch_size, c_len, 1) q_mask = q_mask.view(batch_size, 1, q_len) s1 = masked_softmax(s, q_mask, dim=2) s2 = masked_softmax(s, c_mask, dim=1) a = torch.bmm(s1, q) b = torch.bmm(torch.bmm(s1, s2.transpose(1, 2)), c) x = torch.cat([c, a, c * a, c * b], dim=2) ss = self.get_self_similarity_matrix(x) ss1 = masked_softmax(ss, c_mask, dim=1) patt = torch.bmm(ss1, b) return patt def get_similarity_matrix(self, c, q): """Get the "similarity matrix" between context and query (using the terminology of the BiDAF paper). A naive implementation as described in BiDAF would concatenate the three vectors then project the result with a single weight matrix. This method is a more memory-efficient implementation of the same operation. See Also: Equation 1 in https://arxiv.org/abs/1611.01603 """ c_len, q_len = c.size(1), q.size(1) c = F.dropout(c, self.drop_prob, self.training) q = F.dropout(q, self.drop_prob, self.training) s0 = torch.matmul(c, self.c_weight).expand([-1, -1, q_len]) s1 = torch.matmul(q, self.q_weight).transpose(1, 2).expand([-1, c_len, -1]) s2 = torch.matmul(c * self.cq_weight, q.transpose(1, 2)) s = s0 + s1 + s2 + self.bias return s def get_self_similarity_matrix(self, b): """Get the "similarity matrix" between context and query (using the terminology of the BiDAF paper). A naive implementation as described in BiDAF would concatenate the three vectors then project the result with a single weight matrix. This method is a more memory-efficient implementation of the same operation. See Also: Equation 1 in https://arxiv.org/abs/1611.01603 """ b.size(1) b = F.dropout(b, self.drop_prob, self.training) s0 = torch.matmul(b, self.p_weight1) s1 = torch.matmul(b, self.p_weight2) s = torch.matmul(s0, s1.transpose(1, 2)) return s def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 1]), torch.rand([4, 1, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_add_mul_rsub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp4 = tl.load(in_ptr3 + x4, xmask) tmp6 = tl.load(in_ptr4 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp15 = tl.load(in_ptr5 + x3, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp8 = tmp5 + tmp7 tmp9 = tmp0 * tmp8 tmp10 = 1.0 tmp11 = tmp10 - tmp0 tmp12 = -1e+30 tmp13 = tmp11 * tmp12 tmp14 = tmp9 + tmp13 tmp16 = tmp15 * tmp8 tmp17 = tmp10 - tmp15 tmp18 = tmp17 * tmp12 tmp19 = tmp16 + tmp18 tl.store(out_ptr0 + x4, tmp14, xmask) tl.store(out_ptr1 + x4, tmp19, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (4 * x1 + (-8 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.load(in_ptr1 + (4 * x1 + (-8 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp15 * tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp14, tmp17, tmp18) tmp20 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp23 = tl.load(in_ptr0 + (4 * x1 + (-12 + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tl.load(in_ptr2 + (4 * x1 + (-12 + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = tmp23 * tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp20, tmp25, tmp26) tmp28 = tl.where(tmp14, tmp19, tmp27) tmp29 = tl.where(tmp9, tmp10, tmp28) tmp30 = tl.where(tmp4, tmp5, tmp29) tl.store(out_ptr0 + x2, tmp30, xmask) @triton.jit def triton_poi_fused__softmax_add_mul_rsub_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask) tmp8 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask) tmp15 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr1 + (8 + x0 + 16 * x1), xmask) tmp22 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask) tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp3 - tmp0 tmp5 = -1e+30 tmp6 = tmp4 * tmp5 tmp7 = tmp2 + tmp6 tmp10 = tmp8 * tmp9 tmp11 = tmp3 - tmp8 tmp12 = tmp11 * tmp5 tmp13 = tmp10 + tmp12 tmp14 = triton_helpers.maximum(tmp7, tmp13) tmp17 = tmp15 * tmp16 tmp18 = tmp3 - tmp15 tmp19 = tmp18 * tmp5 tmp20 = tmp17 + tmp19 tmp21 = triton_helpers.maximum(tmp14, tmp20) tmp24 = tmp22 * tmp23 tmp25 = tmp3 - tmp22 tmp26 = tmp25 * tmp5 tmp27 = tmp24 + tmp26 tmp28 = triton_helpers.maximum(tmp21, tmp27) tmp29 = tmp7 - tmp28 tmp30 = tl_math.exp(tmp29) tmp31 = tmp13 - tmp28 tmp32 = tl_math.exp(tmp31) tmp33 = tmp30 + tmp32 tmp34 = tmp20 - tmp28 tmp35 = tl_math.exp(tmp34) tmp36 = tmp33 + tmp35 tmp37 = tmp27 - tmp28 tmp38 = tl_math.exp(tmp37) tmp39 = tmp36 + tmp38 tl.store(out_ptr0 + x2, tmp28, xmask) tl.store(out_ptr1 + x2, tmp39, xmask) @triton.jit def triton_poi_fused__softmax_add_mul_rsub_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x4 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_out_ptr0 + x4, xmask) tmp8 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp3 - tmp0 tmp5 = -1e+30 tmp6 = tmp4 * tmp5 tmp7 = tmp2 + tmp6 tmp9 = tmp7 - tmp8 tmp10 = tl_math.exp(tmp9) tmp12 = tmp10 / tmp11 tl.store(in_out_ptr0 + x4, tmp12, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 1), (1, 1)) assert_size_stride(primals_4, (4, 1), (1, 1)) assert_size_stride(primals_5, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_6, (1,), (1,)) assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_8, (4, 1, 4), (4, 4, 1)) assert_size_stride(primals_9, (16, 2), (2, 1)) assert_size_stride(primals_10, (16, 2), (2, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_3, out=buf0) del primals_3 buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), primals_4, out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(64)](primals_1, primals_5, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf2, reinterpret_tensor(primals_2, (4, 4, 4), ( 16, 1, 4), 0), out=buf3) buf4 = buf2 del buf2 buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_rsub_1[grid(64)](primals_8, buf0, buf1, buf3, primals_6, primals_7, buf4, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_6 buf5 = buf3 del buf3 triton_poi_fused__softmax_2[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused__softmax_3[grid(64)](buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = buf5 del buf5 triton_poi_fused__softmax_4[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = buf7 del buf7 triton_poi_fused__softmax_5[grid(64)](buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = buf8 del buf8 extern_kernels.bmm(buf6, primals_2, out=buf10) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf6, reinterpret_tensor(buf9, (4, 4, 4), (16, 1, 4), 0), out=buf11) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf11, primals_1, out=buf12) buf13 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_cat_6[grid(256)](primals_1, buf10, buf12, buf13, 256, XBLOCK=128, num_warps=4, num_stages=1) buf14 = empty_strided_cuda((16, 2), (2, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf13, (16, 16), (16, 1), 0), primals_9, out=buf14) buf15 = empty_strided_cuda((16, 2), (2, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf13, (16, 16), (16, 1), 0), primals_10, out=buf15) buf16 = buf10 del buf10 extern_kernels.bmm(reinterpret_tensor(buf14, (4, 4, 2), (8, 2, 1), 0), reinterpret_tensor(buf15, (4, 2, 4), (8, 1, 2), 0), out=buf16) buf17 = reinterpret_tensor(buf1, (4, 1, 4), (4, 16, 1), 0) del buf1 buf18 = reinterpret_tensor(buf0, (4, 1, 4), (4, 16, 1), 0) del buf0 triton_poi_fused__softmax_add_mul_rsub_7[grid(16)](primals_7, buf16, buf17, buf18, 16, XBLOCK=16, num_warps=1, num_stages=1) buf19 = buf16 del buf16 triton_poi_fused__softmax_add_mul_rsub_8[grid(64)](buf19, primals_7, buf17, buf18, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf17 del buf18 buf20 = buf11 del buf11 extern_kernels.bmm(buf19, buf12, out=buf20) return (buf20, primals_1, primals_2, primals_7, primals_8, buf6, buf9, buf19, reinterpret_tensor(buf12, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf14, (4, 2, 4), (8, 1, 2), 0), reinterpret_tensor(buf15, (4, 4, 2), (8, 2, 1), 0), reinterpret_tensor(buf13, (16, 16), (1, 16), 0), reinterpret_tensor (primals_10, (2, 16), (1, 2), 0), reinterpret_tensor(primals_9, (2, 16), (1, 2), 0)) def masked_softmax(logits, mask, dim=-1, log_softmax=False): """Take the softmax of `logits` over given dimension, and set entries to 0 wherever `mask` is 0. Args: logits (torch.Tensor): Inputs to the softmax function. mask (torch.Tensor): Same shape as `logits`, with 0 indicating positions that should be assigned 0 probability in the output. dim (int): Dimension over which to take softmax. log_softmax (bool): Take log-softmax rather than regular softmax. E.g., some PyTorch functions such as `F.nll_loss` expect log-softmax. Returns: probs (torch.Tensor): Result of taking masked softmax over the logits. """ mask = mask.type(torch.float32) masked_logits = mask * logits + (1 - mask) * -1e+30 softmax_fn = F.log_softmax if log_softmax else F.softmax probs = softmax_fn(masked_logits, dim) return probs class SelfAttentionNew(nn.Module): """Bidirectional attention originally used by BiDAF. Bidirectional attention computes attention in two directions: The context attends to the query and the query attends to the context. The output of this layer is the concatenation of [context, c2q_attention, context * c2q_attention, context * q2c_attention]. This concatenation allows the attention vector at each timestep, along with the embeddings from previous layers, to flow through the attention layer to the modeling layer. The output has shape (batch_size, context_len, 8 * hidden_size). Args: hidden_size (int): Size of hidden activations. drop_prob (float): Probability of zero-ing out activations. """ def __init__(self, hidden_size, drop_prob=0.1): super().__init__() self.drop_prob = drop_prob self.c_weight = nn.Parameter(torch.zeros(hidden_size, 1)) self.q_weight = nn.Parameter(torch.zeros(hidden_size, 1)) self.p_weight1 = nn.Parameter(torch.zeros(4 * hidden_size, int(np. sqrt(hidden_size)))) self.p_weight2 = nn.Parameter(torch.zeros(4 * hidden_size, int(np. sqrt(hidden_size)))) self.cq_weight = nn.Parameter(torch.zeros(1, 1, hidden_size)) for weight in (self.c_weight, self.q_weight, self.cq_weight): nn.init.xavier_uniform_(weight) for weight in (self.p_weight1, self.p_weight2): nn.init.xavier_uniform_(weight) self.bias = nn.Parameter(torch.zeros(1)) def get_similarity_matrix(self, c, q): """Get the "similarity matrix" between context and query (using the terminology of the BiDAF paper). A naive implementation as described in BiDAF would concatenate the three vectors then project the result with a single weight matrix. This method is a more memory-efficient implementation of the same operation. See Also: Equation 1 in https://arxiv.org/abs/1611.01603 """ c_len, q_len = c.size(1), q.size(1) c = F.dropout(c, self.drop_prob, self.training) q = F.dropout(q, self.drop_prob, self.training) s0 = torch.matmul(c, self.c_weight).expand([-1, -1, q_len]) s1 = torch.matmul(q, self.q_weight).transpose(1, 2).expand([-1, c_len, -1]) s2 = torch.matmul(c * self.cq_weight, q.transpose(1, 2)) s = s0 + s1 + s2 + self.bias return s def get_self_similarity_matrix(self, b): """Get the "similarity matrix" between context and query (using the terminology of the BiDAF paper). A naive implementation as described in BiDAF would concatenate the three vectors then project the result with a single weight matrix. This method is a more memory-efficient implementation of the same operation. See Also: Equation 1 in https://arxiv.org/abs/1611.01603 """ b.size(1) b = F.dropout(b, self.drop_prob, self.training) s0 = torch.matmul(b, self.p_weight1) s1 = torch.matmul(b, self.p_weight2) s = torch.matmul(s0, s1.transpose(1, 2)) return s def forward(self, input_0, input_1, input_2, input_3): primals_3 = self.c_weight primals_4 = self.q_weight primals_9 = self.p_weight1 primals_10 = self.p_weight2 primals_5 = self.cq_weight primals_6 = self.bias primals_1 = input_0 primals_2 = input_1 primals_7 = input_2 primals_8 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
mayankiitg/cs224n
SelfAttention
false
4,015
[ "MIT" ]
0
c67b7904101c8f19a5a231e4fe521e764470d41b
https://github.com/mayankiitg/cs224n/tree/c67b7904101c8f19a5a231e4fe521e764470d41b
WDLoss
import torch from torch import nn class WDLoss(nn.Module): def __init__(self, _lambda): super(WDLoss, self).__init__() self._lambda = _lambda def forward(self, t_x, t_y, t_z): return -(torch.mean(t_x) - torch.mean(t_y) - self._lambda * torch. mean((torch.norm(t_z, dim=1) - 1).pow(2))) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'_lambda': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_0(in_ptr0, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp3, None) @triton.jit def triton_per_fused_linalg_vector_norm_mean_mul_neg_pow_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp18 = tl.load(in_out_ptr0 + 0) tmp19 = tl.broadcast_to(tmp18, [XBLOCK, 1]) tmp22 = tl.load(in_ptr1 + 0) tmp23 = tl.broadcast_to(tmp22, [XBLOCK, 1]) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tmp12 = 1.0 tmp13 = tmp11 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.sum(tmp15, 1)[:, None] tmp20 = 256.0 tmp21 = tmp19 / tmp20 tmp24 = tmp23 / tmp20 tmp25 = tmp21 - tmp24 tmp26 = 64.0 tmp27 = tmp17 / tmp26 tmp28 = 4.0 tmp29 = tmp27 * tmp28 tmp30 = tmp25 - tmp29 tmp31 = -tmp30 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp31, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_mean_0[grid(1)](arg0_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((), (), torch.float32) triton_per_fused_mean_0[grid(1)](arg1_1, buf1, 1, 256, num_warps=2, num_stages=1) del arg1_1 buf3 = buf0 del buf0 triton_per_fused_linalg_vector_norm_mean_mul_neg_pow_sub_1[grid(1)]( buf3, arg2_1, buf1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg2_1 del buf1 return buf3, class WDLossNew(nn.Module): def __init__(self, _lambda): super(WDLossNew, self).__init__() self._lambda = _lambda def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
mirmohammad/IFT6135-TP3
WDLoss
false
4,016
[ "MIT" ]
0
70453b4ea695313837ab88243b0206552eb50632
https://github.com/mirmohammad/IFT6135-TP3/tree/70453b4ea695313837ab88243b0206552eb50632
Linear_fil
import torch import torch.nn as nn class Linear_fil(nn.Module): def __init__(self, input_dim, hidden_dim): super(Linear_fil, self).__init__() self.lin_1 = nn.Linear(input_dim, hidden_dim) self.act = nn.ReLU() self.lin_2 = nn.Linear(hidden_dim, 1) self.sigmoid = nn.Sigmoid() def forward(self, x): x = self.lin_1(x) x = self.act(x) x = self.lin_2(x) x = self.sigmoid(x).squeeze() return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'hidden_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_sigmoid_sigmoid_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp4 tmp7 = tmp4 * tmp6 tl.store(in_out_ptr0 + x0, tmp4, xmask) tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf2 buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_sigmoid_sigmoid_backward_1[grid(64)](buf3, primals_5, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 return reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf4, primals_4, buf5 class Linear_filNew(nn.Module): def __init__(self, input_dim, hidden_dim): super(Linear_filNew, self).__init__() self.lin_1 = nn.Linear(input_dim, hidden_dim) self.act = nn.ReLU() self.lin_2 = nn.Linear(hidden_dim, 1) self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_1 = self.lin_1.weight primals_2 = self.lin_1.bias primals_4 = self.lin_2.weight primals_5 = self.lin_2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
mityanony404/TopGraph
Linear_fil
false
4,017
[ "MIT" ]
0
23595ca5d3dfcd5bc5ebb771800e3fbe9a0d5eed
https://github.com/mityanony404/TopGraph/tree/23595ca5d3dfcd5bc5ebb771800e3fbe9a0d5eed
SimpleStackModel
import torch import torch.onnx import torch.nn class SimpleStackModel(torch.nn.Module): def __init__(self): super(SimpleStackModel, self).__init__() def forward(self, a, b): c = torch.stack((a, b), 0) d = torch.stack((c, c), 1) return torch.stack((d, d), 2) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_stack_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 64 % 4 x3 = xindex // 1024 x0 = xindex % 64 x4 = xindex tmp0 = x1 + 4 * x3 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * (x1 + 4 * x3)), tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 64 * (-4 + x1 + 4 * x3)), tmp6, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x4, tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((2, 2, 2, 4, 4, 4, 4), (1024, 512, 256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_stack_0[grid(2048)](arg0_1, arg1_1, buf0, 2048, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class SimpleStackModelNew(torch.nn.Module): def __init__(self): super(SimpleStackModelNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
mlupon/glow
SimpleStackModel
false
4,018
[ "Apache-2.0" ]
0
aedaa7b98617f1a2db651608e7f7c916a7d2c766
https://github.com/mlupon/glow/tree/aedaa7b98617f1a2db651608e7f7c916a7d2c766
Net
import torch import torch.nn as nn class Net(nn.Module): def __init__(self, input_dim, output_dim, hidden_dim=None, barcode_dim=0): super().__init__() if hidden_dim is None: hidden_dim = [250, 100] self.fc1 = nn.Linear(input_dim, hidden_dim[0]) self.act = nn.ReLU() self.fc2 = nn.Linear(hidden_dim[0], hidden_dim[1]) self.fc3 = nn.Linear(hidden_dim[1], output_dim) self.barcode_dim = barcode_dim def forward(self, x, bar): x = x.view((x.size(0), -1)) bar = bar.view((bar.size(0), -1)) if self.barcode_dim > 0: x = torch.cat((x, bar), dim=1) out = self.fc1(x) out = self.act(out) out = self.fc2(out) out = self.act(out) out = self.fc3(out) return out def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'output_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 250 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 100 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (250, 4), (4, 1)) assert_size_stride(primals_4, (250,), (1,)) assert_size_stride(primals_5, (100, 250), (250, 1)) assert_size_stride(primals_6, (100,), (1,)) assert_size_stride(primals_7, (4, 100), (100, 1)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 250), (250, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_3, (4, 250), (1, 4), 0), out=buf0) del primals_3 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(1000)](buf1, primals_4, 1000, XBLOCK= 256, num_warps=4, num_stages=1) del primals_4 buf2 = empty_strided_cuda((4, 100), (100, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_5, (250, 100), ( 1, 250), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(400)](buf3, primals_6, 400, XBLOCK=256, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, buf3, reinterpret_tensor(primals_7, (100, 4), (1, 100), 0), alpha=1, beta=1, out=buf4) del primals_8 return buf4, primals_1, buf1, buf3, primals_7, primals_5 class NetNew(nn.Module): def __init__(self, input_dim, output_dim, hidden_dim=None, barcode_dim=0): super().__init__() if hidden_dim is None: hidden_dim = [250, 100] self.fc1 = nn.Linear(input_dim, hidden_dim[0]) self.act = nn.ReLU() self.fc2 = nn.Linear(hidden_dim[0], hidden_dim[1]) self.fc3 = nn.Linear(hidden_dim[1], output_dim) self.barcode_dim = barcode_dim def forward(self, input_0, input_1): primals_3 = self.fc1.weight primals_4 = self.fc1.bias primals_5 = self.fc2.weight primals_6 = self.fc2.bias primals_7 = self.fc3.weight primals_8 = self.fc3.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
mityanony404/TopGraph
Net
false
4,019
[ "MIT" ]
0
23595ca5d3dfcd5bc5ebb771800e3fbe9a0d5eed
https://github.com/mityanony404/TopGraph/tree/23595ca5d3dfcd5bc5ebb771800e3fbe9a0d5eed
SimpleSliceModel
import torch import torch.onnx import torch.nn class SimpleSliceModel(torch.nn.Module): def __init__(self): super(SimpleSliceModel, self).__init__() def forward(self, tensor): other = (tensor + tensor)[1:] return other[0][1:] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 + tmp0 tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (3, 4, 4), (16, 4, 1), 80), class SimpleSliceModelNew(torch.nn.Module): def __init__(self): super(SimpleSliceModelNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
mlupon/glow
SimpleSliceModel
false
4,020
[ "Apache-2.0" ]
0
aedaa7b98617f1a2db651608e7f7c916a7d2c766
https://github.com/mlupon/glow/tree/aedaa7b98617f1a2db651608e7f7c916a7d2c766
CAM_Module
from torch.nn import Module import torch import torch.utils.data import torch from torch.nn import Parameter from torch.nn import Softmax class CAM_Module(Module): """ Channel attention module""" def __init__(self, in_dim): super(CAM_Module, self).__init__() self.chanel_in = in_dim self.gamma = Parameter(torch.zeros(1)) self.softmax = Softmax(dim=-1) def forward(self, x): """ inputs : x : input feature maps( B X C X H X W) returns : out : attention value + input feature attention: B X C X C """ m_batchsize, C, height, width = x.size() proj_query = x.view(m_batchsize, C, -1) proj_key = x.view(m_batchsize, C, -1).permute(0, 2, 1) energy = torch.bmm(proj_query, proj_key) energy_new = torch.max(energy, -1, keepdim=True)[0].expand_as(energy ) - energy attention = self.softmax(energy_new) proj_value = x.view(m_batchsize, C, -1) out = torch.bmm(attention, proj_value) out = out.view(m_batchsize, C, height, width) out = self.gamma * out + x return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module import torch.utils.data import torch from torch.nn import Parameter from torch.nn import Softmax assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + x2, xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp8 = tmp6 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_add_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask) tmp3 = tmp1 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(primals_1, (4, 16, 4), (64, 1, 16), 0), out=buf0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sub_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 triton_poi_fused__softmax_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) extern_kernels.bmm(buf3, reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0), out=buf4) del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_3[grid(256)](primals_2, buf4, primals_1, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf5, buf4 class CAM_ModuleNew(Module): """ Channel attention module""" def __init__(self, in_dim): super(CAM_ModuleNew, self).__init__() self.chanel_in = in_dim self.gamma = Parameter(torch.zeros(1)) self.softmax = Softmax(dim=-1) def forward(self, input_0): primals_2 = self.gamma primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
mlcb-jlu/wsMedSeg
CAM_Module
false
4,021
[ "MIT" ]
0
63bd1fd28583f11444f292f4b961870ea1b12635
https://github.com/mlcb-jlu/wsMedSeg/tree/63bd1fd28583f11444f292f4b961870ea1b12635
Homoscedastic
import torch class Homoscedastic(torch.nn.Module): """https://arxiv.homoscedasticorg/abs/1705.07115""" def __init__(self, n_tasks, reduction='sum'): super(Homoscedastic, self).__init__() self.n_tasks = n_tasks self.log_vars = torch.nn.Parameter(torch.zeros(self.n_tasks)) self.reduction = reduction def forward(self, losses): device = losses.device stds = (torch.exp(self.log_vars) ** (1 / 2)).to(device) coeffs = 1 / stds ** 2 multi_task_losses = coeffs * losses + torch.log(stds) if self.reduction == 'sum': multi_task_losses = multi_task_losses.sum() if self.reduction == 'mean': multi_task_losses = multi_task_losses.mean() return multi_task_losses def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_tasks': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_exp_log_mul_pow_reciprocal_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex % 4 r2 = rindex tmp0 = tl.load(in_ptr0 + r0, None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + r2, None) tmp1 = tl_math.exp(tmp0) tmp2 = libdevice.sqrt(tmp1) tmp3 = tmp2 * tmp2 tmp4 = tl.full([1], 1, tl.int32) tmp5 = tmp4 / tmp3 tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp9 = tmp7 * tmp8 tmp10 = tl_math.log(tmp2) tmp11 = tmp9 + tmp10 tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp14, None) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_add_exp_log_mul_pow_reciprocal_sum_0[grid(1)]( primals_2, primals_1, buf0, 1, 256, num_warps=2, num_stages=1) return buf0, primals_1, primals_2 class HomoscedasticNew(torch.nn.Module): """https://arxiv.homoscedasticorg/abs/1705.07115""" def __init__(self, n_tasks, reduction='sum'): super(HomoscedasticNew, self).__init__() self.n_tasks = n_tasks self.log_vars = torch.nn.Parameter(torch.zeros(self.n_tasks)) self.reduction = reduction def forward(self, input_0): primals_2 = self.log_vars primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
moelmahdy/JRS-MTL
Homoscedastic
false
4,022
[ "BSD-3-Clause" ]
0
5abec9e06dad2721929738b1734350ed847e9d5a
https://github.com/moelmahdy/JRS-MTL/tree/5abec9e06dad2721929738b1734350ed847e9d5a
Model
import torch from torch import nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, input_dim): super(Model, self).__init__() self.layer1 = nn.Linear(input_dim, 50) self.layer2 = nn.Linear(50, 20) self.layer3 = nn.Linear(20, 1) def forward(self, x): x = F.relu(self.layer1(x)) x = F.relu(self.layer2(x)) x = self.layer3(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 3200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 50 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1280 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 20 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (50, 4), (4, 1)) assert_size_stride(primals_2, (50,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (20, 50), (50, 1)) assert_size_stride(primals_5, (20,), (1,)) assert_size_stride(primals_6, (1, 20), (20, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 50), (50, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 50), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 50), (800, 200, 50, 1), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 50), (800, 200, 50, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(3200)](buf1, primals_2, buf7, 3200, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 20), (20, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 50), (50, 1), 0), reinterpret_tensor(primals_4, (50, 20), (1, 50), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 20), (320, 80, 20, 1), 0) del buf2 buf6 = empty_strided_cuda((4, 4, 4, 20), (320, 80, 20, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(1280)](buf3, primals_5, buf6, 1280, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 20), (20, 1), 0), reinterpret_tensor(primals_6, (20, 1), (1, 20), 0), alpha=1, beta=1, out=buf5) del primals_7 return reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 50), (50, 1), 0), reinterpret_tensor( buf3, (64, 20), (20, 1), 0), primals_6, buf6, primals_4, buf7 class ModelNew(nn.Module): def __init__(self, input_dim): super(ModelNew, self).__init__() self.layer1 = nn.Linear(input_dim, 50) self.layer2 = nn.Linear(50, 20) self.layer3 = nn.Linear(20, 1) def forward(self, input_0): primals_1 = self.layer1.weight primals_2 = self.layer1.bias primals_4 = self.layer2.weight primals_5 = self.layer2.bias primals_6 = self.layer3.weight primals_7 = self.layer3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
mlsquare/kitchen
Model
false
4,023
[ "MIT" ]
0
3664fd289f7ea5c20cdd55e96ebe29b77effa062
https://github.com/mlsquare/kitchen/tree/3664fd289f7ea5c20cdd55e96ebe29b77effa062
CDAE
import torch from torch import nn from torch.autograd import Variable def add_gaussian_noise(x, std): return x + Variable(x.data.new(x.size()).normal_(0, std)) class CDAE(nn.Module): """ Convolutional denoising autoencoder layer for stacked autoencoders. Args: in_channels: the number of channels in the input. out_channels: the number of channels in the output. stride: stride of the convolutional layers. """ def __init__(self, in_channels, out_channels, kernel_size, stride=2, noise_std=0.1, **kwargs): super(CDAE, self).__init__(**kwargs) self.std = noise_std self.encoder = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=0) self.decoder = nn.ConvTranspose2d(out_channels, in_channels, kernel_size, stride=stride, padding=0) def forward(self, x): if self.training: x += add_gaussian_noise(x, self.std) emb = torch.relu(self.encoder(x)) return emb, torch.relu(self.decoder(emb)) def reconstruct(self, emb): return self.decoder(emb) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn from torch.autograd import Variable assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(16)](buf1, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_1[grid(256)](buf3, primals_5, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 return buf1, buf3, primals_1, primals_3, primals_4, buf1, buf4 def add_gaussian_noise(x, std): return x + Variable(x.data.new(x.size()).normal_(0, std)) class CDAENew(nn.Module): """ Convolutional denoising autoencoder layer for stacked autoencoders. Args: in_channels: the number of channels in the input. out_channels: the number of channels in the output. stride: stride of the convolutional layers. """ def __init__(self, in_channels, out_channels, kernel_size, stride=2, noise_std=0.1, **kwargs): super(CDAENew, self).__init__(**kwargs) self.std = noise_std self.encoder = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=0) self.decoder = nn.ConvTranspose2d(out_channels, in_channels, kernel_size, stride=stride, padding=0) def reconstruct(self, emb): return self.decoder(emb) def forward(self, input_0): primals_1 = self.encoder.weight primals_2 = self.encoder.bias primals_3 = self.decoder.weight primals_5 = self.decoder.bias primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
mmcenta/eye-disease-recognition
CDAE
false
4,025
[ "MIT" ]
0
52e1dedbce27514b605b9f8ad976d6042b7e2f14
https://github.com/mmcenta/eye-disease-recognition/tree/52e1dedbce27514b605b9f8ad976d6042b7e2f14
MLP
from torch.nn import Module import torch from torch.nn import Linear from torch.nn import Sigmoid from torch.nn import ReLU from torch.nn.init import kaiming_normal from torch.nn.init import xavier_normal class MLP(Module): def __init__(self, n_inputs): super(MLP, self).__init__() self.hidden1 = Linear(n_inputs, 10) kaiming_normal(self.hidden1.weight, nonlinearity='relu') self.act1 = ReLU() self.hidden2 = Linear(10, 8) kaiming_normal(self.hidden2.weight, nonlinearity='relu') self.act2 = ReLU() self.hidden3 = Linear(8, 1) xavier_normal(self.hidden3.weight) self.act3 = Sigmoid() def forward(self, X): X = self.hidden1(X) X = self.act1(X) X = self.hidden2(X) X = self.act2(X) X = self.hidden3(X) X = self.act3(X) return X def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_inputs': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import Module from torch.nn import Linear from torch.nn import Sigmoid from torch.nn import ReLU from torch.nn.init import kaiming_normal from torch.nn.init import xavier_normal assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 10 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (10, 4), (4, 1)) assert_size_stride(primals_2, (10,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (8, 10), (10, 1)) assert_size_stride(primals_5, (8,), (1,)) assert_size_stride(primals_6, (1, 8), (8, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 10), (10, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 10), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 10), (160, 40, 10, 1), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 10), (160, 40, 10, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(640)](buf1, primals_2, buf7, 640, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 10), (10, 1), 0), reinterpret_tensor(primals_4, (10, 8), (1, 10), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 8), (128, 32, 8, 1), 0) del buf2 buf6 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(512)](buf3, primals_5, buf6, 512, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 8), (8, 1), 0), reinterpret_tensor(primals_6, (8, 1), (1, 8), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf4 triton_poi_fused_sigmoid_2[grid(64)](buf5, primals_7, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 10), (10, 1), 0), reinterpret_tensor( buf3, (64, 8), (8, 1), 0), buf5, primals_6, buf6, primals_4, buf7 class MLPNew(Module): def __init__(self, n_inputs): super(MLPNew, self).__init__() self.hidden1 = Linear(n_inputs, 10) kaiming_normal(self.hidden1.weight, nonlinearity='relu') self.act1 = ReLU() self.hidden2 = Linear(10, 8) kaiming_normal(self.hidden2.weight, nonlinearity='relu') self.act2 = ReLU() self.hidden3 = Linear(8, 1) xavier_normal(self.hidden3.weight) self.act3 = Sigmoid() def forward(self, input_0): primals_1 = self.hidden1.weight primals_2 = self.hidden1.bias primals_4 = self.hidden2.weight primals_5 = self.hidden2.bias primals_6 = self.hidden3.weight primals_7 = self.hidden3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
mmg63/Pytorch-Code-for-Binary-classification
MLP
false
4,026
[ "MIT" ]
0
773e909fcba41cdaba48c96e35da68acaf64c513
https://github.com/mmg63/Pytorch-Code-for-Binary-classification/tree/773e909fcba41cdaba48c96e35da68acaf64c513
ConvNeuralNetwork
import torch import torch.nn as nn class ConvNeuralNetwork(nn.Module): def __init__(self, num_classes=3): super(ConvNeuralNetwork, self).__init__() self.conv1 = nn.Conv2d(in_channels=3, out_channels=12, kernel_size= 3, stride=1, padding=1) self.conv2 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size =3, stride=1, padding=1) self.conv3 = nn.Conv2d(in_channels=12, out_channels=24, kernel_size =3, stride=1, padding=1) self.conv4 = nn.Conv2d(in_channels=24, out_channels=24, kernel_size =3, stride=1, padding=1) self.relu = nn.ReLU() self.pool = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(in_features=32 * 32 * 24, out_features=num_classes ) self.dropout = nn.Dropout2d(p=0.5) def forward(self, input): output = self.conv1(input) output = self.relu(output) output = self.conv2(output) output = self.relu(output) output = self.pool(output) output = self.conv3(output) output = self.relu(output) output = self.conv4(output) output = self.relu(output) output = output.view(-1, 32 * 32 * 24) output = self.fc1(output) return output def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 12 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 24 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 24 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, None) tl.store(out_ptr0 + x3, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (12, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (12,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (12, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_5, (12,), (1,)) assert_size_stride(primals_6, (24, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_7, (24,), (1,)) assert_size_stride(primals_8, (24, 24, 3, 3), (216, 9, 3, 1)) assert_size_stride(primals_9, (24,), (1,)) assert_size_stride(primals_10, (3, 24576), (24576, 1)) assert_size_stride(primals_11, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 12, 64, 64), (49152, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(196608)](buf1, primals_2, 196608, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 12, 64, 64), (49152, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_0[grid(196608)](buf3, primals_5, 196608, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 12, 32, 32), (12288, 1024, 32, 1), torch.float32) buf5 = empty_strided_cuda((4, 12, 32, 32), (12288, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(49152)](buf3, buf4, buf5, 49152, XBLOCK=256, num_warps=4, num_stages=1) buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 24, 32, 32), (24576, 1024, 32, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_2[grid(98304)](buf7, primals_7, 98304, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 24, 32, 32), (24576, 1024, 32, 1)) buf9 = buf8 del buf8 buf11 = empty_strided_cuda((4, 24, 32, 32), (24576, 1024, 32, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_3[grid(98304)]( buf9, primals_9, buf11, 98304, XBLOCK=512, num_warps=8, num_stages=1) del primals_9 buf10 = empty_strided_cuda((4, 3), (3, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf9, (4, 24576 ), (24576, 1), 0), reinterpret_tensor(primals_10, (24576, 3), ( 1, 24576), 0), alpha=1, beta=1, out=buf10) del primals_11 return (buf10, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf3, buf4, buf5, buf7, reinterpret_tensor(buf9, (4, 24576), (24576, 1), 0), primals_10, buf11) class ConvNeuralNetworkNew(nn.Module): def __init__(self, num_classes=3): super(ConvNeuralNetworkNew, self).__init__() self.conv1 = nn.Conv2d(in_channels=3, out_channels=12, kernel_size= 3, stride=1, padding=1) self.conv2 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size =3, stride=1, padding=1) self.conv3 = nn.Conv2d(in_channels=12, out_channels=24, kernel_size =3, stride=1, padding=1) self.conv4 = nn.Conv2d(in_channels=24, out_channels=24, kernel_size =3, stride=1, padding=1) self.relu = nn.ReLU() self.pool = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(in_features=32 * 32 * 24, out_features=num_classes ) self.dropout = nn.Dropout2d(p=0.5) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_10 = self.fc1.weight primals_11 = self.fc1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
mngaonkar/pytorch-image-classifier
ConvNeuralNetwork
false
4,027
[ "MIT" ]
0
f10b4363dc62c2fbbb5fbfbc56a3849da623fc80
https://github.com/mngaonkar/pytorch-image-classifier/tree/f10b4363dc62c2fbbb5fbfbc56a3849da623fc80
AffineTransform
import torch from torch import nn class FC(nn.Module): def __init__(self, n_dim_in, n_dim_out, equal_lr=True): super().__init__() norm_const = n_dim_in ** -0.5 scale_init = 1 if equal_lr else norm_const self.scale_forward = norm_const if equal_lr else 1 self.weight = nn.Parameter(scale_init * torch.randn(n_dim_out, n_dim_in)) self.bias = nn.Parameter(torch.zeros(n_dim_out)) def forward(self, x): return nn.functional.linear(x, self.scale_forward * self.weight, bias=self.bias) class AffineTransform(nn.Module): def __init__(self, n_dim_w, n_feature_maps, equal_lr): super().__init__() self.fc = FC(n_dim_w, n_feature_maps, equal_lr=equal_lr) nn.init.ones_(self.fc.bias) def forward(self, w): return self.fc(w) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_dim_w': 4, 'n_feature_maps': 4, 'equal_lr': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del buf0 del primals_2 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class FC(nn.Module): def __init__(self, n_dim_in, n_dim_out, equal_lr=True): super().__init__() norm_const = n_dim_in ** -0.5 scale_init = 1 if equal_lr else norm_const self.scale_forward = norm_const if equal_lr else 1 self.weight = nn.Parameter(scale_init * torch.randn(n_dim_out, n_dim_in)) self.bias = nn.Parameter(torch.zeros(n_dim_out)) def forward(self, x): return nn.functional.linear(x, self.scale_forward * self.weight, bias=self.bias) class AffineTransformNew(nn.Module): def __init__(self, n_dim_w, n_feature_maps, equal_lr): super().__init__() self.fc = FC(n_dim_w, n_feature_maps, equal_lr=equal_lr) nn.init.ones_(self.fc.bias) def forward(self, input_0): primals_1 = self.fc.weight primals_2 = self.fc.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
moritztng/stylegan2-pytorch
AffineTransform
false
4,028
[ "MIT" ]
0
8827eae2e76c54b7406b34b2d49563ae53b04001
https://github.com/moritztng/stylegan2-pytorch/tree/8827eae2e76c54b7406b34b2d49563ae53b04001
NeuralNetwork
import torch import torch.nn as nn import torch.nn.functional as F class NeuralNetwork(nn.Module): def __init__(self, num_classes=3): super(NeuralNetwork, self).__init__() self.fc1 = nn.Linear(64 * 64 * 3, 84) self.fc2 = nn.Linear(84, 50) self.fc3 = nn.Linear(50, num_classes) def forward(self, x): x = x.view(-1, 64 * 64 * 3) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def get_inputs(): return [torch.rand([4, 12288])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 336 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 84 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 50 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 12288), (12288, 1)) assert_size_stride(primals_2, (84, 12288), (12288, 1)) assert_size_stride(primals_3, (84,), (1,)) assert_size_stride(primals_4, (50, 84), (84, 1)) assert_size_stride(primals_5, (50,), (1,)) assert_size_stride(primals_6, (3, 50), (50, 1)) assert_size_stride(primals_7, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 84), (84, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (12288, 84), (1, 12288), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(336)](buf1, primals_3, 336, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 50), (50, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (84, 50), (1, 84), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(200)](buf3, primals_5, 200, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 3), (3, 1), torch.float32) extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (50, 3), (1, 50), 0), alpha=1, beta=1, out=buf4) del primals_7 return buf4, primals_1, buf1, buf3, primals_6, primals_4 class NeuralNetworkNew(nn.Module): def __init__(self, num_classes=3): super(NeuralNetworkNew, self).__init__() self.fc1 = nn.Linear(64 * 64 * 3, 84) self.fc2 = nn.Linear(84, 50) self.fc3 = nn.Linear(50, num_classes) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
mngaonkar/pytorch-image-classifier
NeuralNetwork
false
4,029
[ "MIT" ]
0
f10b4363dc62c2fbbb5fbfbc56a3849da623fc80
https://github.com/mngaonkar/pytorch-image-classifier/tree/f10b4363dc62c2fbbb5fbfbc56a3849da623fc80
Conv
import torch from torch import nn class Conv(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, equal_lr=True): super().__init__() self.stride = stride self.padding = padding self.dilation = dilation norm_const = (in_channels * kernel_size ** 2) ** -0.5 scale_init = 1 if equal_lr else norm_const self.scale_forward = norm_const if equal_lr else 1 self.weight = nn.Parameter(scale_init * torch.randn(out_channels, in_channels, kernel_size, kernel_size)) self.bias = nn.Parameter(torch.zeros(out_channels)) def forward(self, x, y_s=None, demod=False): weight = self.scale_forward * self.weight bias = self.bias groups = 1 batch_size = x.size(0) if y_s is not None: weight = y_s.view(y_s.size(0), 1, y_s.size(1), 1, 1 ) * weight.unsqueeze(0) if demod: x_s = ((weight ** 2).sum(dim=(2, 3, 4)) + 1e-08) ** 0.5 weight = weight / x_s.view(*x_s.size(), 1, 1, 1) weight = weight.view(-1, *weight.size()[2:]) bias = bias.expand(batch_size, -1).reshape(-1) groups = batch_size x = x.reshape(1, -1, *x.size()[2:]) x = nn.functional.conv2d(x, weight, bias=bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=groups) return x.view(batch_size, -1, *x.size()[2:]) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.125 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_view_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(primals_3, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_view_1[grid(16)](buf2, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return buf2, primals_3, buf0 class ConvNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, equal_lr=True): super().__init__() self.stride = stride self.padding = padding self.dilation = dilation norm_const = (in_channels * kernel_size ** 2) ** -0.5 scale_init = 1 if equal_lr else norm_const self.scale_forward = norm_const if equal_lr else 1 self.weight = nn.Parameter(scale_init * torch.randn(out_channels, in_channels, kernel_size, kernel_size)) self.bias = nn.Parameter(torch.zeros(out_channels)) def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
moritztng/stylegan2-pytorch
Conv
false
4,030
[ "MIT" ]
0
8827eae2e76c54b7406b34b2d49563ae53b04001
https://github.com/moritztng/stylegan2-pytorch/tree/8827eae2e76c54b7406b34b2d49563ae53b04001
_Classifier
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class _Classifier(nn.Module): def __init__(self, z_c_dim): super(_Classifier, self).__init__() self.fc1 = nn.Linear(z_c_dim, 50) self.fc2 = nn.Linear(50, 10) def forward(self, z_c): h = F.relu(self.fc1(z_c)) h = self.fc2(h) return h def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'z_c_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 3200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 50 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (50, 4), (4, 1)) assert_size_stride(primals_2, (50,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (10, 50), (50, 1)) assert_size_stride(primals_5, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 50), (50, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 50), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 50), (800, 200, 50, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 50), (800, 200, 50, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(3200)](buf1, primals_2, buf3, 3200, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 50), (50, 1), 0), reinterpret_tensor(primals_4, (50, 10), (1, 50), 0 ), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 10), (160, 40, 10, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 50), (50, 1), 0), primals_4, buf3 class _ClassifierNew(nn.Module): def __init__(self, z_c_dim): super(_ClassifierNew, self).__init__() self.fc1 = nn.Linear(z_c_dim, 50) self.fc2 = nn.Linear(50, 10) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
mori97/revae
_Classifier
false
4,031
[ "MIT" ]
0
465009076a9be78e8ddb9021a0699b32fc695f30
https://github.com/mori97/revae/tree/465009076a9be78e8ddb9021a0699b32fc695f30
Distance
import torch import torch.nn as nn class Distance(nn.Module): def __init__(self): super(Distance, self).__init__() def forward(self, s, t): n, q = s.shape[0], t.shape[0] dist = (t.unsqueeze(0).expand(n, q, -1) - s.unsqueeze(1).expand(n, q, -1)).pow(2).sum(dim=2).T return dist def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tl.store(out_ptr0 + x2, tmp18, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pow_sub_sum_0[grid(16)](arg1_1, arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return reinterpret_tensor(buf0, (4, 4), (1, 4), 0), class DistanceNew(nn.Module): def __init__(self): super(DistanceNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
msc5/ml-tools
Distance
false
4,032
[ "Apache-2.0" ]
0
75ca504bdc0495e8a929ad73501b7de692b3089a
https://github.com/msc5/ml-tools/tree/75ca504bdc0495e8a929ad73501b7de692b3089a