entry_point
stringlengths
1
65
original_triton_python_code
stringlengths
208
619k
optimised_triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
ZeroConv2d
import torch from torch import nn from torch.nn import functional as F class ZeroConv2d(nn.Module): def __init__(self, in_channel, out_channel, padding=1): super().__init__() self.conv = nn.Conv2d(in_channel, out_channel, 3, padding=0) self.conv.weight.data.zero_() self.conv.bias.data.zero_() self.scale = nn.Parameter(torch.zeros(1, out_channel, 1, 1)) def forward(self, input): out = F.pad(input, [1, 1, 1, 1], value=1) out = self.conv(out) out = out * torch.exp(self.scale * 3) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=1.0) tl.store(out_ptr0 + x4, tmp11, xmask) @triton.jit def triton_poi_fused_convolution_exp_mul_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = 3.0 tmp5 = tmp3 * tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tmp2 * tmp6 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(576)](primals_1, buf0, 576, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_convolution_exp_mul_1[grid(256)](buf2, primals_3, primals_4, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf3, primals_2, primals_4, buf0, buf2 class ZeroConv2dNew(nn.Module): def __init__(self, in_channel, out_channel, padding=1): super().__init__() self.conv = nn.Conv2d(in_channel, out_channel, 3, padding=0) self.conv.weight.data.zero_() self.conv.bias.data.zero_() self.scale = nn.Parameter(torch.zeros(1, out_channel, 1, 1)) def forward(self, input_0): primals_4 = self.scale primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
hologerry/glow-pytorch-1
ZeroConv2d
false
3,616
[ "MIT" ]
0
9d3f95f4ff7f0a1361796a9b2554e3c229aad9b7
https://github.com/hologerry/glow-pytorch-1/tree/9d3f95f4ff7f0a1361796a9b2554e3c229aad9b7
SmoothnessLoss
import torch import torch.nn as nn import torch.nn.functional as F class SmoothnessLoss(nn.Module): def __init__(self): super().__init__() def forward(self, pred_label): _n, _c, w, h = pred_label.size() loss = torch.tensor(0.0, device=pred_label.device) for i in range(w - 1): for j in range(h - 1): loss += F.l1_loss(pred_label[:, :, i, j], pred_label[:, :, i + 1, j], reduction='sum') loss += F.l1_loss(pred_label[:, :, i, j], pred_label[:, :, i, j + 1], reduction='sum') loss /= w * h return loss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_div_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 16 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (4 + 16 * r0), None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1 + 16 * r0), None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (5 + 16 * r0), None, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr0 + (2 + 16 * r0), None, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr0 + (6 + 16 * r0), None, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr0 + (3 + 16 * r0), None, eviction_policy='evict_last' ) tmp37 = tl.load(in_ptr0 + (8 + 16 * r0), None, eviction_policy='evict_last' ) tmp48 = tl.load(in_ptr0 + (9 + 16 * r0), None, eviction_policy='evict_last' ) tmp59 = tl.load(in_ptr0 + (10 + 16 * r0), None, eviction_policy= 'evict_last') tmp65 = tl.load(in_ptr0 + (7 + 16 * r0), None, eviction_policy='evict_last' ) tmp71 = tl.load(in_ptr0 + (12 + 16 * r0), None, eviction_policy= 'evict_last') tmp82 = tl.load(in_ptr0 + (13 + 16 * r0), None, eviction_policy= 'evict_last') tmp93 = tl.load(in_ptr0 + (14 + 16 * r0), None, eviction_policy= 'evict_last') tmp99 = tl.load(in_ptr0 + (11 + 16 * r0), None, eviction_policy= 'evict_last') tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.sum(tmp4, 1)[:, None] tmp8 = tmp0 - tmp7 tmp9 = tl_math.abs(tmp8) tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.sum(tmp10, 1)[:, None] tmp14 = tmp7 - tmp13 tmp15 = tl_math.abs(tmp14) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.sum(tmp16, 1)[:, None] tmp20 = tmp7 - tmp19 tmp21 = tl_math.abs(tmp20) tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK]) tmp24 = tl.sum(tmp22, 1)[:, None] tmp26 = tmp19 - tmp25 tmp27 = tl_math.abs(tmp26) tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp30 = tl.sum(tmp28, 1)[:, None] tmp32 = tmp19 - tmp31 tmp33 = tl_math.abs(tmp32) tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK]) tmp36 = tl.sum(tmp34, 1)[:, None] tmp38 = tmp1 - tmp37 tmp39 = tl_math.abs(tmp38) tmp40 = tl.broadcast_to(tmp39, [XBLOCK, RBLOCK]) tmp42 = tl.sum(tmp40, 1)[:, None] tmp43 = tmp1 - tmp13 tmp44 = tl_math.abs(tmp43) tmp45 = tl.broadcast_to(tmp44, [XBLOCK, RBLOCK]) tmp47 = tl.sum(tmp45, 1)[:, None] tmp49 = tmp13 - tmp48 tmp50 = tl_math.abs(tmp49) tmp51 = tl.broadcast_to(tmp50, [XBLOCK, RBLOCK]) tmp53 = tl.sum(tmp51, 1)[:, None] tmp54 = tmp13 - tmp25 tmp55 = tl_math.abs(tmp54) tmp56 = tl.broadcast_to(tmp55, [XBLOCK, RBLOCK]) tmp58 = tl.sum(tmp56, 1)[:, None] tmp60 = tmp25 - tmp59 tmp61 = tl_math.abs(tmp60) tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK]) tmp64 = tl.sum(tmp62, 1)[:, None] tmp66 = tmp25 - tmp65 tmp67 = tl_math.abs(tmp66) tmp68 = tl.broadcast_to(tmp67, [XBLOCK, RBLOCK]) tmp70 = tl.sum(tmp68, 1)[:, None] tmp72 = tmp37 - tmp71 tmp73 = tl_math.abs(tmp72) tmp74 = tl.broadcast_to(tmp73, [XBLOCK, RBLOCK]) tmp76 = tl.sum(tmp74, 1)[:, None] tmp77 = tmp37 - tmp48 tmp78 = tl_math.abs(tmp77) tmp79 = tl.broadcast_to(tmp78, [XBLOCK, RBLOCK]) tmp81 = tl.sum(tmp79, 1)[:, None] tmp83 = tmp48 - tmp82 tmp84 = tl_math.abs(tmp83) tmp85 = tl.broadcast_to(tmp84, [XBLOCK, RBLOCK]) tmp87 = tl.sum(tmp85, 1)[:, None] tmp88 = tmp48 - tmp59 tmp89 = tl_math.abs(tmp88) tmp90 = tl.broadcast_to(tmp89, [XBLOCK, RBLOCK]) tmp92 = tl.sum(tmp90, 1)[:, None] tmp94 = tmp59 - tmp93 tmp95 = tl_math.abs(tmp94) tmp96 = tl.broadcast_to(tmp95, [XBLOCK, RBLOCK]) tmp98 = tl.sum(tmp96, 1)[:, None] tmp100 = tmp59 - tmp99 tmp101 = tl_math.abs(tmp100) tmp102 = tl.broadcast_to(tmp101, [XBLOCK, RBLOCK]) tmp104 = tl.sum(tmp102, 1)[:, None] tmp105 = tmp6 + tmp12 tmp106 = tmp105 + tmp18 tmp107 = tmp106 + tmp24 tmp108 = tmp107 + tmp30 tmp109 = tmp108 + tmp36 tmp110 = tmp109 + tmp42 tmp111 = tmp110 + tmp47 tmp112 = tmp111 + tmp53 tmp113 = tmp112 + tmp58 tmp114 = tmp113 + tmp64 tmp115 = tmp114 + tmp70 tmp116 = tmp115 + tmp76 tmp117 = tmp116 + tmp81 tmp118 = tmp117 + tmp87 tmp119 = tmp118 + tmp92 tmp120 = tmp119 + tmp98 tmp121 = tmp120 + tmp104 tmp122 = 0.0625 tmp123 = tmp121 * tmp122 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp123, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf16 = buf0 del buf0 buf19 = buf16 del buf16 get_raw_stream(0) triton_per_fused_abs_add_div_sub_sum_0[grid(1)](buf19, arg0_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf19, class SmoothnessLossNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
hologerry/DewarpNet
SmoothnessLoss
false
3,617
[ "MIT" ]
0
b0a11b9fbb98bd124e65d3165ce177d9ebf2e836
https://github.com/hologerry/DewarpNet/tree/b0a11b9fbb98bd124e65d3165ce177d9ebf2e836
AELoss
import torch import torch.utils.data from torch import nn class AELoss(nn.Module): def __init__(self, pull_factor, push_factor, distance, margin_push): super(AELoss, self).__init__() self.pull_factor = pull_factor self.push_factor = push_factor self.distance = distance self.margin_push = margin_push def forward(self, lof_tag_img, lof_tag_avg_img, lof_tag_avg_gather_img, mask, centerness_img=None): lof_tag_avg_gather_img = torch.round(lof_tag_avg_gather_img / self. distance) * self.distance tag = torch.pow(lof_tag_img - torch.round(lof_tag_avg_gather_img), 2) dist = lof_tag_avg_img.unsqueeze(0) - lof_tag_avg_img.unsqueeze(1) dist = self.distance + self.margin_push - torch.abs(dist) dist = nn.functional.relu(dist, inplace=True) dist = dist[mask] if centerness_img is not None: pull = (tag * centerness_img).sum() / centerness_img.sum() push = torch.zeros_like(pull) if mask.any(): push = dist.sum() / mask.sum().float() else: pull = tag.mean() push = dist.mean() return self.pull_factor * pull, self.push_factor * push def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.ones([4], dtype=torch.int64)] def get_init_inputs(): return [[], {'pull_factor': 4, 'push_factor': 4, 'distance': 4, 'margin_push': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_mean_mul_pow_round_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = 0.25 tmp3 = tmp1 * tmp2 tmp4 = libdevice.nearbyint(tmp3) tmp5 = 4.0 tmp6 = tmp4 * tmp5 tmp7 = libdevice.nearbyint(tmp6) tmp8 = tmp0 - tmp7 tmp9 = tmp8 * tmp8 tmp10 = tl.broadcast_to(tmp9, [RBLOCK]) tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0)) tmp13 = 256.0 tmp14 = tmp12 / tmp13 tmp15 = tmp14 * tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp15, None) @triton.jit def triton_per_fused_abs_index_mean_mul_relu_rsub_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex // 256 r3 = rindex % 256 r0 = rindex % 64 tmp0 = tl.load(in_ptr0 + r2, None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp1 = tl.full([RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), 'index out of bounds: 0 <= tmp4 < 4') tmp7 = tl.load(in_ptr1 + (r0 + 64 * tmp4), None) tmp8 = tmp6 - tmp7 tmp9 = tl_math.abs(tmp8) tmp10 = 8.0 tmp11 = tmp10 - tmp9 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = 1024.0 tmp18 = tmp16 / tmp17 tmp19 = 4.0 tmp20 = tmp18 * tmp19 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_div_mean_mul_pow_round_sub_0[grid(1)](buf2, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf3 = buf1 del buf1 triton_per_fused_abs_index_mean_mul_relu_rsub_sub_1[grid(1)](buf3, arg3_1, arg2_1, 1, 1024, num_warps=8, num_stages=1) del arg2_1 del arg3_1 return buf2, buf3 class AELossNew(nn.Module): def __init__(self, pull_factor, push_factor, distance, margin_push): super(AELossNew, self).__init__() self.pull_factor = pull_factor self.push_factor = push_factor self.distance = distance self.margin_push = margin_push def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0], output[1]
houweidong/FCOS
AELoss
false
3,618
[ "BSD-2-Clause" ]
0
ad7d5e5d1b162398af408a9635ce8a2012f7db8a
https://github.com/houweidong/FCOS/tree/ad7d5e5d1b162398af408a9635ce8a2012f7db8a
MCFullyConnected
import collections import torch import torch.utils.data from torch import nn def get_redistribution(kind: 'str', num_states: 'int', num_features: 'int'= None, num_out: 'int'=None, normaliser: 'nn.Module'=None, **kwargs): if kind == 'linear': return LinearRedistribution(num_states, num_features, num_out, normaliser) elif kind == 'outer': return OuterRedistribution(num_states, num_features, num_out, normaliser, **kwargs) elif kind == 'gate': return GateRedistribution(num_states, num_features, num_out, normaliser ) else: raise ValueError('unknown kind of redistribution: {}'.format(kind)) class SummaryWriterNamespaceNoLoggingScope: def __init__(self, writer): self._writer = writer def __enter__(self): self._writer._logging_enabled = False def __exit__(self, type, value, traceback): self._writer._logging_enabled = True return False class DummySummaryWriter: def __init__(self, **kwargs): self._logging_enabled = False pass def add_scalar(self, name, value, verbose_only=True): pass def add_summary(self, name, tensor, verbose_only=True): pass def add_histogram(self, name, tensor, verbose_only=True): pass def add_tensor(self, name, tensor, verbose_only=True): pass def print(self, name, tensor, verbose_only=True): pass def namespace(self, name): return self def every(self, epoch_interval): return self def verbose(self, verbose): return self def no_logging(self): return SummaryWriterNamespaceNoLoggingScope(self) class NoRandomScope: def __init__(self, module): self._module = module def __enter__(self): self._module._disable_random() def __exit__(self, type, value, traceback): self._module._enable_random() return False class ExtendedTorchModule(torch.nn.Module): def __init__(self, default_name, *args, writer=None, name=None, **kwargs): super().__init__() if writer is None: writer = DummySummaryWriter() self.writer = writer.namespace(default_name if name is None else name) self.allow_random = True def set_parameter(self, name, value): parameter = getattr(self, name, None) if isinstance(parameter, torch.nn.Parameter): parameter.fill_(value) for module in self.children(): if isinstance(module, ExtendedTorchModule): module.set_parameter(name, value) def regualizer(self, merge_in=None): regualizers = collections.defaultdict(int) if merge_in is not None: for key, value in merge_in.items(): self.writer.add_scalar(f'regualizer/{key}', value) regualizers[key] += value for module in self.children(): if isinstance(module, ExtendedTorchModule): for key, value in module.regualizer().items(): regualizers[key] += value return regualizers def optimize(self, loss): for module in self.children(): if isinstance(module, ExtendedTorchModule): module.optimize(loss) def log_gradients(self): for name, parameter in self.named_parameters(recurse=False): if parameter.requires_grad: gradient, *_ = parameter.grad.data self.writer.add_summary(f'{name}/grad', gradient) self.writer.add_histogram(f'{name}/grad', gradient) for module in self.children(): if isinstance(module, ExtendedTorchModule): module.log_gradients() def no_internal_logging(self): return self.writer.no_logging() def _disable_random(self): self.allow_random = False for module in self.children(): if isinstance(module, ExtendedTorchModule): module._disable_random() def _enable_random(self): self.allow_random = True for module in self.children(): if isinstance(module, ExtendedTorchModule): module._enable_random() def no_random(self): return NoRandomScope(self) class NormalisedSigmoid(nn.Module): """ Normalised logistic sigmoid function. """ def __init__(self, p: 'float'=1, dim: 'int'=-1): super().__init__() self.p = p self.dim = dim def forward(self, s: 'torch.Tensor') ->torch.Tensor: a = torch.sigmoid(s) return torch.nn.functional.normalize(a, p=self.p, dim=self.dim) class Redistribution(nn.Module): """ Base class for modules that generate redistribution vectors/matrices. """ def __init__(self, num_states: 'int', num_features: 'int'=None, num_out: 'int'=None, normaliser: 'nn.Module'=None): """ Parameters ---------- num_states : int The number of states this redistribution is to be applied on. num_features : int, optional The number of features to use for configuring the redistribution. If the redistribution is not input-dependent, this argument will be ignored. num_out : int, optional The number of outputs to redistribute the states to. If nothing is specified, the redistribution matrix is assumed to be square. normaliser : Module, optional Function to use for normalising the redistribution matrix. """ super().__init__() self.num_features = num_features self.num_states = num_states self.num_out = num_out or num_states self.normaliser = normaliser or NormalisedSigmoid(dim=-1) def _compute(self, x: 'torch.Tensor') ->torch.Tensor: raise NotImplementedError('subclass must implement this method') def forward(self, x: 'torch.Tensor') ->torch.Tensor: r = self._compute(x) return self.normaliser(r) class Gate(Redistribution): """ Classic gate as used in e.g. LSTMs. Notes ----- The vector that is computed by this module gives rise to a diagonal redistribution matrix, i.e. a redistribution matrix that does not really redistribute (not normalised). """ def __init__(self, num_states, num_features, num_out=None, sigmoid=None): super().__init__(num_states, num_features, 1, sigmoid or nn.Sigmoid()) self.fc = nn.Linear(num_features, num_states) self.reset_parameters() def reset_parameters(self): nn.init.orthogonal_(self.fc.weight) nn.init.zeros_(self.fc.bias) def _compute(self, x: 'torch.Tensor') ->torch.Tensor: return self.fc(x) class GateRedistribution(Redistribution): """ Gate-like redistribution that only depends on input. This module directly computes all entries for the redistribution matrix from a linear combination of the input values and is normalised by the activation function. """ def __init__(self, num_states, num_features, num_out=None, normaliser=None ): super().__init__(num_states, num_features, num_out, normaliser) self.fc = nn.Linear(num_features, self.num_states * self.num_out) self.reset_parameters() def reset_parameters(self): nn.init.orthogonal_(self.fc.weight) nn.init.zeros_(self.fc.bias) if self.num_states == self.num_out: with torch.no_grad(): self.fc.bias[0::self.num_out + 1] = 3 def _compute(self, x: 'torch.Tensor') ->torch.Tensor: logits = self.fc(x) return logits.view(-1, self.num_states, self.num_out) class LinearRedistribution(Redistribution): """ Redistribution by normalising a learned matrix. This module has an unnormalised version of the redistribution matrix as parameters and is normalised by applying a non-linearity (the normaliser). The redistribution does not depend on any of the input values, but is updated using the gradients to fit the data. """ def __init__(self, num_states, num_features=0, num_out=None, normaliser =None): super(LinearRedistribution, self).__init__(num_states, 0, num_out, normaliser) self.r = nn.Parameter(torch.empty(self.num_states, self.num_out), requires_grad=True) self.reset_parameters() @torch.no_grad() def reset_parameters(self): if self.num_states == self.num_out: nn.init.eye_(self.r) if type(self.normaliser) is NormalisedSigmoid: torch.mul(self.r, 2, out=self.r) torch.sub(self.r, 1, out=self.r) else: nn.init.orthogonal_(self.r) def _compute(self, x: 'torch.Tensor') ->torch.Tensor: return self.r.unsqueeze(0) class OuterRedistribution(Redistribution): """ Redistribution by (weighted) outer product of two input-dependent vectors. This module computes the entries for the redistribution matrix as the outer product of two vectors that are linear combinations of the input values. There is an option to include a weight matrix parameter to weight each entry in the resulting matrix, which is then normalised using a non-linearity. The weight matrix parameter is updated through the gradients to fit the data. """ def __init__(self, num_states, num_features, num_out=None, normaliser= None, weighted: 'bool'=False): """ Parameters ---------- weighted : bool, optional Whether or not to use a weighted outer product. """ super(OuterRedistribution, self).__init__(num_states, num_features, num_out, normaliser) self.weighted = weighted self.r = nn.Parameter(torch.empty(self.num_states, self.num_out), requires_grad=weighted) self.fc1 = nn.Linear(num_features, self.num_states) self.fc2 = nn.Linear(num_features, self.num_out) self.phi = lambda x: x self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.r) nn.init.orthogonal_(self.fc1.weight) nn.init.zeros_(self.fc1.bias) nn.init.orthogonal_(self.fc2.weight) nn.init.zeros_(self.fc2.bias) def _compute(self, x: 'torch.Tensor') ->torch.Tensor: a1 = self.phi(self.fc1(x)) a2 = self.phi(self.fc2(x)) outer = a1.unsqueeze(-1) * a2.unsqueeze(-2) if self.weighted: outer *= self.r return outer class MCFullyConnected(ExtendedTorchModule): def __init__(self, in_features: 'int', out_features: 'int', **kwargs): super().__init__('MCFC', **kwargs) self.mass_input_size = in_features self.aux_input_size = 1 self.hidden_size = out_features self.normaliser = nn.Softmax(dim=-1) self.out_gate = Gate(self.hidden_size, self.aux_input_size) self.junction = get_redistribution('linear', num_states=self. mass_input_size, num_features=self.aux_input_size, num_out=self .hidden_size, normaliser=self.normaliser) @torch.no_grad() def reset_parameters(self): self.out_gate.reset_parameters() self.junction.reset_parameters() def log_gradients(self): for name, parameter in self.named_parameters(): gradient, *_ = parameter.grad.data self.writer.add_summary(f'{name}/grad', gradient) self.writer.add_histogram(f'{name}/grad', gradient) def regualizer(self, merge_in=None): r1 = -torch.mean(self.junction.r ** 2) r2 = -torch.mean(self.out_gate.fc.weight ** 2) r3 = -torch.mean(self.out_gate.fc.bias ** 2) return super().regualizer({'W': r1 + r2 + r3}) def forward(self, x): x_m, x_a = x, x.new_ones(1) j = self.junction(x_a) o = self.out_gate(x_a) m_in = torch.matmul(x_m.unsqueeze(-2), j).squeeze(-2) return o * m_in def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import collections import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_view_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp0 = 1.0 tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp0, None) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + x2, xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 1), (1, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(16)](primals_2, buf0, 16, XBLOCK= 16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((1, 1), (1, 1), torch.float32) triton_poi_fused_view_1[grid(1)](buf1, 1, XBLOCK=1, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((1, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, buf1, reinterpret_tensor(primals_3, (1, 4), (1, 1), 0), alpha=1, beta=1, out=buf2) del primals_3 del primals_4 buf3 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(16)](buf0, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 buf4 = empty_strided_cuda((64, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (64, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf3, (64, 4, 4), (0, 4, 1), 0), out =buf4) del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_3[grid(256)](buf2, buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf5, primals_2, buf1, buf2, buf4, reinterpret_tensor(primals_1, (64, 4, 1), (4, 1, 4), 0) def get_redistribution(kind: 'str', num_states: 'int', num_features: 'int'= None, num_out: 'int'=None, normaliser: 'nn.Module'=None, **kwargs): if kind == 'linear': return LinearRedistribution(num_states, num_features, num_out, normaliser) elif kind == 'outer': return OuterRedistribution(num_states, num_features, num_out, normaliser, **kwargs) elif kind == 'gate': return GateRedistribution(num_states, num_features, num_out, normaliser ) else: raise ValueError('unknown kind of redistribution: {}'.format(kind)) class SummaryWriterNamespaceNoLoggingScope: def __init__(self, writer): self._writer = writer def __enter__(self): self._writer._logging_enabled = False def __exit__(self, type, value, traceback): self._writer._logging_enabled = True return False class DummySummaryWriter: def __init__(self, **kwargs): self._logging_enabled = False pass def add_scalar(self, name, value, verbose_only=True): pass def add_summary(self, name, tensor, verbose_only=True): pass def add_histogram(self, name, tensor, verbose_only=True): pass def add_tensor(self, name, tensor, verbose_only=True): pass def print(self, name, tensor, verbose_only=True): pass def namespace(self, name): return self def every(self, epoch_interval): return self def verbose(self, verbose): return self def no_logging(self): return SummaryWriterNamespaceNoLoggingScope(self) class NoRandomScope: def __init__(self, module): self._module = module def __enter__(self): self._module._disable_random() def __exit__(self, type, value, traceback): self._module._enable_random() return False class ExtendedTorchModule(torch.nn.Module): def __init__(self, default_name, *args, writer=None, name=None, **kwargs): super().__init__() if writer is None: writer = DummySummaryWriter() self.writer = writer.namespace(default_name if name is None else name) self.allow_random = True def set_parameter(self, name, value): parameter = getattr(self, name, None) if isinstance(parameter, torch.nn.Parameter): parameter.fill_(value) for module in self.children(): if isinstance(module, ExtendedTorchModule): module.set_parameter(name, value) def regualizer(self, merge_in=None): regualizers = collections.defaultdict(int) if merge_in is not None: for key, value in merge_in.items(): self.writer.add_scalar(f'regualizer/{key}', value) regualizers[key] += value for module in self.children(): if isinstance(module, ExtendedTorchModule): for key, value in module.regualizer().items(): regualizers[key] += value return regualizers def optimize(self, loss): for module in self.children(): if isinstance(module, ExtendedTorchModule): module.optimize(loss) def log_gradients(self): for name, parameter in self.named_parameters(recurse=False): if parameter.requires_grad: gradient, *_ = parameter.grad.data self.writer.add_summary(f'{name}/grad', gradient) self.writer.add_histogram(f'{name}/grad', gradient) for module in self.children(): if isinstance(module, ExtendedTorchModule): module.log_gradients() def no_internal_logging(self): return self.writer.no_logging() def _disable_random(self): self.allow_random = False for module in self.children(): if isinstance(module, ExtendedTorchModule): module._disable_random() def _enable_random(self): self.allow_random = True for module in self.children(): if isinstance(module, ExtendedTorchModule): module._enable_random() def no_random(self): return NoRandomScope(self) class NormalisedSigmoid(nn.Module): """ Normalised logistic sigmoid function. """ def __init__(self, p: 'float'=1, dim: 'int'=-1): super().__init__() self.p = p self.dim = dim def forward(self, s: 'torch.Tensor') ->torch.Tensor: a = torch.sigmoid(s) return torch.nn.functional.normalize(a, p=self.p, dim=self.dim) class Redistribution(nn.Module): """ Base class for modules that generate redistribution vectors/matrices. """ def __init__(self, num_states: 'int', num_features: 'int'=None, num_out: 'int'=None, normaliser: 'nn.Module'=None): """ Parameters ---------- num_states : int The number of states this redistribution is to be applied on. num_features : int, optional The number of features to use for configuring the redistribution. If the redistribution is not input-dependent, this argument will be ignored. num_out : int, optional The number of outputs to redistribute the states to. If nothing is specified, the redistribution matrix is assumed to be square. normaliser : Module, optional Function to use for normalising the redistribution matrix. """ super().__init__() self.num_features = num_features self.num_states = num_states self.num_out = num_out or num_states self.normaliser = normaliser or NormalisedSigmoid(dim=-1) def _compute(self, x: 'torch.Tensor') ->torch.Tensor: raise NotImplementedError('subclass must implement this method') def forward(self, x: 'torch.Tensor') ->torch.Tensor: r = self._compute(x) return self.normaliser(r) class Gate(Redistribution): """ Classic gate as used in e.g. LSTMs. Notes ----- The vector that is computed by this module gives rise to a diagonal redistribution matrix, i.e. a redistribution matrix that does not really redistribute (not normalised). """ def __init__(self, num_states, num_features, num_out=None, sigmoid=None): super().__init__(num_states, num_features, 1, sigmoid or nn.Sigmoid()) self.fc = nn.Linear(num_features, num_states) self.reset_parameters() def reset_parameters(self): nn.init.orthogonal_(self.fc.weight) nn.init.zeros_(self.fc.bias) def _compute(self, x: 'torch.Tensor') ->torch.Tensor: return self.fc(x) class GateRedistribution(Redistribution): """ Gate-like redistribution that only depends on input. This module directly computes all entries for the redistribution matrix from a linear combination of the input values and is normalised by the activation function. """ def __init__(self, num_states, num_features, num_out=None, normaliser=None ): super().__init__(num_states, num_features, num_out, normaliser) self.fc = nn.Linear(num_features, self.num_states * self.num_out) self.reset_parameters() def reset_parameters(self): nn.init.orthogonal_(self.fc.weight) nn.init.zeros_(self.fc.bias) if self.num_states == self.num_out: with torch.no_grad(): self.fc.bias[0::self.num_out + 1] = 3 def _compute(self, x: 'torch.Tensor') ->torch.Tensor: logits = self.fc(x) return logits.view(-1, self.num_states, self.num_out) class LinearRedistribution(Redistribution): """ Redistribution by normalising a learned matrix. This module has an unnormalised version of the redistribution matrix as parameters and is normalised by applying a non-linearity (the normaliser). The redistribution does not depend on any of the input values, but is updated using the gradients to fit the data. """ def __init__(self, num_states, num_features=0, num_out=None, normaliser =None): super(LinearRedistribution, self).__init__(num_states, 0, num_out, normaliser) self.r = nn.Parameter(torch.empty(self.num_states, self.num_out), requires_grad=True) self.reset_parameters() @torch.no_grad() def reset_parameters(self): if self.num_states == self.num_out: nn.init.eye_(self.r) if type(self.normaliser) is NormalisedSigmoid: torch.mul(self.r, 2, out=self.r) torch.sub(self.r, 1, out=self.r) else: nn.init.orthogonal_(self.r) def _compute(self, x: 'torch.Tensor') ->torch.Tensor: return self.r.unsqueeze(0) class OuterRedistribution(Redistribution): """ Redistribution by (weighted) outer product of two input-dependent vectors. This module computes the entries for the redistribution matrix as the outer product of two vectors that are linear combinations of the input values. There is an option to include a weight matrix parameter to weight each entry in the resulting matrix, which is then normalised using a non-linearity. The weight matrix parameter is updated through the gradients to fit the data. """ def __init__(self, num_states, num_features, num_out=None, normaliser= None, weighted: 'bool'=False): """ Parameters ---------- weighted : bool, optional Whether or not to use a weighted outer product. """ super(OuterRedistribution, self).__init__(num_states, num_features, num_out, normaliser) self.weighted = weighted self.r = nn.Parameter(torch.empty(self.num_states, self.num_out), requires_grad=weighted) self.fc1 = nn.Linear(num_features, self.num_states) self.fc2 = nn.Linear(num_features, self.num_out) self.phi = lambda x: x self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.r) nn.init.orthogonal_(self.fc1.weight) nn.init.zeros_(self.fc1.bias) nn.init.orthogonal_(self.fc2.weight) nn.init.zeros_(self.fc2.bias) def _compute(self, x: 'torch.Tensor') ->torch.Tensor: a1 = self.phi(self.fc1(x)) a2 = self.phi(self.fc2(x)) outer = a1.unsqueeze(-1) * a2.unsqueeze(-2) if self.weighted: outer *= self.r return outer class MCFullyConnectedNew(ExtendedTorchModule): def __init__(self, in_features: 'int', out_features: 'int', **kwargs): super().__init__('MCFC', **kwargs) self.mass_input_size = in_features self.aux_input_size = 1 self.hidden_size = out_features self.normaliser = nn.Softmax(dim=-1) self.out_gate = Gate(self.hidden_size, self.aux_input_size) self.junction = get_redistribution('linear', num_states=self. mass_input_size, num_features=self.aux_input_size, num_out=self .hidden_size, normaliser=self.normaliser) @torch.no_grad() def reset_parameters(self): self.out_gate.reset_parameters() self.junction.reset_parameters() def log_gradients(self): for name, parameter in self.named_parameters(): gradient, *_ = parameter.grad.data self.writer.add_summary(f'{name}/grad', gradient) self.writer.add_histogram(f'{name}/grad', gradient) def regualizer(self, merge_in=None): r1 = -torch.mean(self.junction.r ** 2) r2 = -torch.mean(self.out_gate.fc.weight ** 2) r3 = -torch.mean(self.out_gate.fc.bias ** 2) return super().regualizer({'W': r1 + r2 + r3}) def forward(self, input_0): primals_3 = self.out_gate.fc.weight primals_4 = self.out_gate.fc.bias primals_2 = self.junction.r primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
hoedt/stable-nalu
MCFullyConnected
false
3,619
[ "MIT" ]
0
64b3d240db8bff4da857d955f213ef3c7e38e035
https://github.com/hoedt/stable-nalu/tree/64b3d240db8bff4da857d955f213ef3c7e38e035
AdaptiveInstanceNorm
import torch from torch import nn from math import sqrt def equal_lr(module, name='weight'): """Rescale weights after every updates. """ EqualLR.apply(module, name) return module class EqualLR: def __init__(self, name): self.name = name def compute_weight(self, module): weight = getattr(module, self.name + '_orig') fan_in = weight.data.size(1) * weight.data[0][0].numel() return weight * sqrt(2 / fan_in) @staticmethod def apply(module, name): fn = EqualLR(name) weight = getattr(module, name) del module._parameters[name] module.register_parameter(name + '_orig', nn.Parameter(weight.data)) module.register_forward_pre_hook(fn) return fn def __call__(self, module, input): weight = self.compute_weight(module) setattr(module, self.name, weight) class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim): super().__init__() linear = nn.Linear(in_dim, out_dim) linear.weight.data.normal_() linear.bias.data.zero_() self.linear = equal_lr(linear) def forward(self, input): return self.linear(input) class AdaptiveInstanceNorm(nn.Module): def __init__(self, in_channel, style_dim): super().__init__() self.norm = nn.InstanceNorm2d(in_channel) self.style = EqualLinear(style_dim, in_channel * 2) self.style.linear.bias.data[:in_channel] = 1 self.style.linear.bias.data[in_channel:] = 0 def forward(self, input, style): style = self.style(style).unsqueeze(2).unsqueeze(3) gamma, beta = style.chunk(2, 1) out = self.norm(input) out = gamma * out + beta return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn from math import sqrt assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.7071067811865476 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_add_mul_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 x3 = xindex // 4 tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp22 = tl.load(in_ptr1 + (x2 + 8 * x3), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (4 + x2 + 8 * x3), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr2 + (4 + x2), xmask, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 16.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp24 = tmp22 + tmp23 tmp25 = tmp0 - tmp10 tmp26 = tmp25 * tmp21 tmp27 = tmp24 * tmp26 tmp30 = tmp28 + tmp29 tmp31 = tmp27 + tmp30 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(out_ptr1 + (r1 + 16 * x0), tmp31, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (8, 4), (4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(32)](primals_1, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(buf0, (4, 8), (1, 4 ), 0), out=buf1) buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf5 = reinterpret_tensor(buf3, (1, 16, 1, 1), (16, 1, 1, 1), 0) del buf3 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_per_fused__native_batch_norm_legit_add_mul_1[grid(16)](buf5, primals_4, buf1, primals_2, buf2, buf6, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del buf1 del primals_2 return buf6, buf0, primals_3, primals_4, buf2, buf5 def equal_lr(module, name='weight'): """Rescale weights after every updates. """ EqualLR.apply(module, name) return module class EqualLR: def __init__(self, name): self.name = name def compute_weight(self, module): weight = getattr(module, self.name + '_orig') fan_in = weight.data.size(1) * weight.data[0][0].numel() return weight * sqrt(2 / fan_in) @staticmethod def apply(module, name): fn = EqualLR(name) weight = getattr(module, name) del module._parameters[name] module.register_parameter(name + '_orig', nn.Parameter(weight.data)) module.register_forward_pre_hook(fn) return fn def __call__(self, module, input): weight = self.compute_weight(module) setattr(module, self.name, weight) class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim): super().__init__() linear = nn.Linear(in_dim, out_dim) linear.weight.data.normal_() linear.bias.data.zero_() self.linear = equal_lr(linear) def forward(self, input): return self.linear(input) class AdaptiveInstanceNormNew(nn.Module): def __init__(self, in_channel, style_dim): super().__init__() self.norm = nn.InstanceNorm2d(in_channel) self.style = EqualLinear(style_dim, in_channel * 2) self.style.linear.bias.data[:in_channel] = 1 self.style.linear.bias.data[in_channel:] = 0 def forward(self, input_0, input_1): primals_2 = self.style.linear.bias primals_1 = self.style.linear.weight_orig primals_4 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
hologerry/style-based-gan-pytorch
AdaptiveInstanceNorm
false
3,620
[ "MIT" ]
0
1a694fb3ea0288f1aaaa43aa67a570d908d9dc27
https://github.com/hologerry/style-based-gan-pytorch/tree/1a694fb3ea0288f1aaaa43aa67a570d908d9dc27
EqualLinear
import torch from torch import nn from math import sqrt def equal_lr(module, name='weight'): """Rescale weights after every updates. """ EqualLR.apply(module, name) return module class EqualLR: def __init__(self, name): self.name = name def compute_weight(self, module): weight = getattr(module, self.name + '_orig') fan_in = weight.data.size(1) * weight.data[0][0].numel() return weight * sqrt(2 / fan_in) @staticmethod def apply(module, name): fn = EqualLR(name) weight = getattr(module, name) del module._parameters[name] module.register_parameter(name + '_orig', nn.Parameter(weight.data)) module.register_forward_pre_hook(fn) return fn def __call__(self, module, input): weight = self.compute_weight(module) setattr(module, self.name, weight) class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim): super().__init__() linear = nn.Linear(in_dim, out_dim) linear.weight.data.normal_() linear.bias.data.zero_() self.linear = equal_lr(linear) def forward(self, input): return self.linear(input) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn from math import sqrt assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.7071067811865476 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf0, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) def equal_lr(module, name='weight'): """Rescale weights after every updates. """ EqualLR.apply(module, name) return module class EqualLR: def __init__(self, name): self.name = name def compute_weight(self, module): weight = getattr(module, self.name + '_orig') fan_in = weight.data.size(1) * weight.data[0][0].numel() return weight * sqrt(2 / fan_in) @staticmethod def apply(module, name): fn = EqualLR(name) weight = getattr(module, name) del module._parameters[name] module.register_parameter(name + '_orig', nn.Parameter(weight.data)) module.register_forward_pre_hook(fn) return fn def __call__(self, module, input): weight = self.compute_weight(module) setattr(module, self.name, weight) class EqualLinearNew(nn.Module): def __init__(self, in_dim, out_dim): super().__init__() linear = nn.Linear(in_dim, out_dim) linear.weight.data.normal_() linear.bias.data.zero_() self.linear = equal_lr(linear) def forward(self, input_0): primals_2 = self.linear.bias primals_1 = self.linear.weight_orig primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
hologerry/style-based-gan-pytorch
EqualLinear
false
3,621
[ "MIT" ]
0
1a694fb3ea0288f1aaaa43aa67a570d908d9dc27
https://github.com/hologerry/style-based-gan-pytorch/tree/1a694fb3ea0288f1aaaa43aa67a570d908d9dc27
CFRB
import torch from collections import OrderedDict import torch.nn as nn import torch.nn.functional as F from torch import autograd as autograd import torch.fft from itertools import product as product def sequential(*args): """Advanced nn.Sequential. Args: nn.Sequential, nn.Module Returns: nn.Sequential """ if len(args) == 1: if isinstance(args[0], OrderedDict): raise NotImplementedError( 'sequential does not support OrderedDict input.') return args[0] modules = [] for module in args: if isinstance(module, nn.Sequential): for submodule in module.children(): modules.append(submodule) elif isinstance(module, nn.Module): modules.append(module) return nn.Sequential(*modules) def conv(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding= 1, bias=True, mode='CBR', negative_slope=0.2): L = [] for t in mode: if t == 'C': L.append(nn.Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias)) elif t == 'T': L.append(nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride= stride, padding=padding, bias=bias)) elif t == 'B': L.append(nn.BatchNorm2d(out_channels, momentum=0.9, eps=0.0001, affine=True)) elif t == 'I': L.append(nn.InstanceNorm2d(out_channels, affine=True)) elif t == 'R': L.append(nn.ReLU(inplace=True)) elif t == 'r': L.append(nn.ReLU(inplace=False)) elif t == 'L': L.append(nn.LeakyReLU(negative_slope=negative_slope, inplace=True)) elif t == 'l': L.append(nn.LeakyReLU(negative_slope=negative_slope, inplace=False) ) elif t == '2': L.append(nn.PixelShuffle(upscale_factor=2)) elif t == '3': L.append(nn.PixelShuffle(upscale_factor=3)) elif t == '4': L.append(nn.PixelShuffle(upscale_factor=4)) elif t == 'U': L.append(nn.Upsample(scale_factor=2, mode='nearest')) elif t == 'u': L.append(nn.Upsample(scale_factor=3, mode='nearest')) elif t == 'v': L.append(nn.Upsample(scale_factor=4, mode='nearest')) elif t == 'M': L.append(nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=0)) elif t == 'A': L.append(nn.AvgPool2d(kernel_size=kernel_size, stride=stride, padding=0)) else: raise NotImplementedError('Undefined type: ') return sequential(*L) class ESA(nn.Module): def __init__(self, channel=64, reduction=4, bias=True): super(ESA, self).__init__() self.r_nc = channel // reduction self.conv1 = nn.Conv2d(channel, self.r_nc, kernel_size=1) self.conv21 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=1) self.conv2 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, stride= 2, padding=0) self.conv3 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1) self.conv4 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1) self.conv5 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1) self.conv6 = nn.Conv2d(self.r_nc, channel, kernel_size=1) self.sigmoid = nn.Sigmoid() self.relu = nn.ReLU(inplace=True) def forward(self, x): x1 = self.conv1(x) x2 = F.max_pool2d(self.conv2(x1), kernel_size=7, stride=3) x2 = self.relu(self.conv3(x2)) x2 = self.relu(self.conv4(x2)) x2 = F.interpolate(self.conv5(x2), (x.size(2), x.size(3)), mode= 'bilinear', align_corners=False) x2 = self.conv6(x2 + self.conv21(x1)) return x.mul(self.sigmoid(x2)) class CFRB(nn.Module): def __init__(self, in_channels=50, out_channels=50, kernel_size=3, stride=1, padding=1, bias=True, mode='CL', d_rate=0.5, negative_slope=0.05): super(CFRB, self).__init__() self.d_nc = int(in_channels * d_rate) self.r_nc = in_channels assert mode[0] == 'C', 'convolutional layer first' self.conv1_d = conv(in_channels, self.d_nc, kernel_size=1, stride=1, padding=0, bias=bias, mode=mode[0]) self.conv1_r = conv(in_channels, self.r_nc, kernel_size, stride, padding, bias=bias, mode=mode[0]) self.conv2_d = conv(self.r_nc, self.d_nc, kernel_size=1, stride=1, padding=0, bias=bias, mode=mode[0]) self.conv2_r = conv(self.r_nc, self.r_nc, kernel_size, stride, padding, bias=bias, mode=mode[0]) self.conv3_d = conv(self.r_nc, self.d_nc, kernel_size=1, stride=1, padding=0, bias=bias, mode=mode[0]) self.conv3_r = conv(self.r_nc, self.r_nc, kernel_size, stride, padding, bias=bias, mode=mode[0]) self.conv4_d = conv(self.r_nc, self.d_nc, kernel_size, stride, padding, bias=bias, mode=mode[0]) self.conv1x1 = conv(self.d_nc * 4, out_channels, kernel_size=1, stride=1, padding=0, bias=bias, mode=mode[0]) self.act = conv(mode=mode[-1], negative_slope=negative_slope) self.esa = ESA(in_channels, reduction=4, bias=True) def forward(self, x): d1 = self.conv1_d(x) x = self.act(self.conv1_r(x) + x) d2 = self.conv2_d(x) x = self.act(self.conv2_r(x) + x) d3 = self.conv3_d(x) x = self.act(self.conv3_r(x) + x) x = self.conv4_d(x) x = self.act(torch.cat([d1, d2, d3, x], dim=1)) x = self.esa(self.conv1x1(x)) return x def get_inputs(): return [torch.rand([4, 50, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from collections import OrderedDict import torch.nn as nn import torch.nn.functional as F from torch import autograd as autograd import torch.fft from itertools import product as product assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 50 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = tmp4 > tmp5 tmp7 = 0.05 tmp8 = tmp4 * tmp7 tmp9 = tl.where(tmp6, tmp4, tmp8) tl.store(in_out_ptr0 + x3, tmp9, None) @triton.jit def triton_poi_fused_cat_leaky_relu_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 100 x0 = xindex % 4096 x2 = xindex // 409600 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 25, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 102400 * x2), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 50, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tmp10 & tmp12 tmp14 = tl.load(in_ptr2 + (x0 + 4096 * (-25 + x1) + 102400 * x2), tmp13, other=0.0) tmp15 = tl.load(in_ptr3 + (-25 + x1), tmp13, eviction_policy= 'evict_last', other=0.0) tmp16 = tmp14 + tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp13, tmp16, tmp17) tmp19 = tmp0 >= tmp11 tmp20 = tl.full([1], 75, tl.int64) tmp21 = tmp0 < tmp20 tmp22 = tmp19 & tmp21 tmp23 = tl.load(in_ptr4 + (x0 + 4096 * (-50 + x1) + 102400 * x2), tmp22, other=0.0) tmp24 = tl.load(in_ptr5 + (-50 + x1), tmp22, eviction_policy= 'evict_last', other=0.0) tmp25 = tmp23 + tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp22, tmp25, tmp26) tmp28 = tmp0 >= tmp20 tl.full([1], 100, tl.int64) tmp31 = tl.load(in_ptr6 + (x0 + 4096 * (-75 + x1) + 102400 * x2), tmp28, other=0.0) tmp32 = tl.load(in_ptr7 + (-75 + x1), tmp28, eviction_policy= 'evict_last', other=0.0) tmp33 = tmp31 + tmp32 tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp28, tmp33, tmp34) tmp36 = tl.where(tmp22, tmp27, tmp35) tmp37 = tl.where(tmp13, tmp18, tmp36) tmp38 = tl.where(tmp4, tmp9, tmp37) tmp39 = 0.0 tmp40 = tmp38 > tmp39 tmp41 = 0.05 tmp42 = tmp38 * tmp41 tmp43 = tl.where(tmp40, tmp38, tmp42) tl.store(in_out_ptr0 + x3, tmp43, None) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 50 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 12 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 46128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 961 % 12 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 3888 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 81 % 12 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused__to_copy_6(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.140625 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tl.store(out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_poi_fused_add_clamp_7(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.140625 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 8, tl.int64) tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_8(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.140625 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tmp9.to(tl.float32) tmp11 = tmp8 - tmp10 tmp12 = triton_helpers.maximum(tmp11, tmp7) tmp13 = 1.0 tmp14 = triton_helpers.minimum(tmp12, tmp13) tl.store(out_ptr0 + x0, tmp14, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_mul_sub_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 64 % 64 x0 = xindex % 64 x5 = xindex // 4096 x2 = xindex // 4096 % 12 x6 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last') tmp34 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last') tmp37 = tl.load(in_ptr8 + x6, None) tmp38 = tl.load(in_ptr9 + x2, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 9, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 9 * tmp4 + 81 * x5), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp13 = tmp12 + tmp1 tmp14 = tmp12 < 0 tmp15 = tl.where(tmp14, tmp13, tmp12) tmp16 = tl.load(in_ptr2 + (tmp15 + 9 * tmp4 + 81 * x5), None, eviction_policy='evict_last') tmp17 = tmp16 + tmp10 tmp18 = tmp17 - tmp11 tmp20 = tmp18 * tmp19 tmp21 = tmp11 + tmp20 tmp23 = tmp22 + tmp1 tmp24 = tmp22 < 0 tmp25 = tl.where(tmp24, tmp23, tmp22) tmp26 = tl.load(in_ptr2 + (tmp8 + 9 * tmp25 + 81 * x5), None, eviction_policy='evict_last') tmp27 = tmp26 + tmp10 tmp28 = tl.load(in_ptr2 + (tmp15 + 9 * tmp25 + 81 * x5), None, eviction_policy='evict_last') tmp29 = tmp28 + tmp10 tmp30 = tmp29 - tmp27 tmp31 = tmp30 * tmp19 tmp32 = tmp27 + tmp31 tmp33 = tmp32 - tmp21 tmp35 = tmp33 * tmp34 tmp36 = tmp21 + tmp35 tmp39 = tmp37 + tmp38 tmp40 = tmp36 + tmp39 tl.store(in_out_ptr0 + x6, tmp40, None) @triton.jit def triton_poi_fused_convolution_mul_sigmoid_10(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 50 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, None) tmp2 = tmp0 + tmp1 tmp4 = tl.sigmoid(tmp2) tmp5 = tmp3 * tmp4 tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(out_ptr0 + x3, tmp5, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31) = args args.clear() assert_size_stride(primals_1, (25, 50, 1, 1), (50, 1, 1, 1)) assert_size_stride(primals_2, (25,), (1,)) assert_size_stride(primals_3, (4, 50, 64, 64), (204800, 4096, 64, 1)) assert_size_stride(primals_4, (50, 50, 3, 3), (450, 9, 3, 1)) assert_size_stride(primals_5, (50,), (1,)) assert_size_stride(primals_6, (25, 50, 1, 1), (50, 1, 1, 1)) assert_size_stride(primals_7, (25,), (1,)) assert_size_stride(primals_8, (50, 50, 3, 3), (450, 9, 3, 1)) assert_size_stride(primals_9, (50,), (1,)) assert_size_stride(primals_10, (25, 50, 1, 1), (50, 1, 1, 1)) assert_size_stride(primals_11, (25,), (1,)) assert_size_stride(primals_12, (50, 50, 3, 3), (450, 9, 3, 1)) assert_size_stride(primals_13, (50,), (1,)) assert_size_stride(primals_14, (25, 50, 3, 3), (450, 9, 3, 1)) assert_size_stride(primals_15, (25,), (1,)) assert_size_stride(primals_16, (50, 100, 1, 1), (100, 1, 1, 1)) assert_size_stride(primals_17, (50,), (1,)) assert_size_stride(primals_18, (12, 50, 1, 1), (50, 1, 1, 1)) assert_size_stride(primals_19, (12,), (1,)) assert_size_stride(primals_20, (12, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_21, (12,), (1,)) assert_size_stride(primals_22, (12, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_23, (12,), (1,)) assert_size_stride(primals_24, (12, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_25, (12,), (1,)) assert_size_stride(primals_26, (12, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_27, (12,), (1,)) assert_size_stride(primals_28, (12, 12, 1, 1), (12, 1, 1, 1)) assert_size_stride(primals_29, (12,), (1,)) assert_size_stride(primals_30, (50, 12, 1, 1), (12, 1, 1, 1)) assert_size_stride(primals_31, (50,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 25, 64, 64), (102400, 4096, 64, 1)) buf1 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 50, 64, 64), (204800, 4096, 64, 1)) buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_add_convolution_leaky_relu_0[grid(819200)](buf2, primals_5, primals_3, 819200, XBLOCK=512, num_warps=8, num_stages=1 ) del primals_5 buf3 = extern_kernels.convolution(buf2, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 25, 64, 64), (102400, 4096, 64, 1)) buf4 = extern_kernels.convolution(buf2, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 50, 64, 64), (204800, 4096, 64, 1)) buf5 = buf4 del buf4 triton_poi_fused_add_convolution_leaky_relu_0[grid(819200)](buf5, primals_9, buf2, 819200, XBLOCK=512, num_warps=8, num_stages=1) del primals_9 buf6 = extern_kernels.convolution(buf5, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 25, 64, 64), (102400, 4096, 64, 1)) buf7 = extern_kernels.convolution(buf5, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 50, 64, 64), (204800, 4096, 64, 1)) buf8 = buf7 del buf7 triton_poi_fused_add_convolution_leaky_relu_0[grid(819200)](buf8, primals_13, buf5, 819200, XBLOCK=512, num_warps=8, num_stages=1) del primals_13 buf9 = extern_kernels.convolution(buf8, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 25, 64, 64), (102400, 4096, 64, 1)) buf10 = empty_strided_cuda((4, 100, 64, 64), (409600, 4096, 64, 1), torch.float32) buf11 = buf10 del buf10 triton_poi_fused_cat_leaky_relu_1[grid(1638400)](buf11, buf0, primals_2, buf3, primals_7, buf6, primals_11, buf9, primals_15, 1638400, XBLOCK=512, num_warps=8, num_stages=1) del buf0 del buf3 del buf6 del buf9 del primals_11 del primals_15 del primals_2 del primals_7 buf12 = extern_kernels.convolution(buf11, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 50, 64, 64), (204800, 4096, 64, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_2[grid(819200)](buf13, primals_17, 819200, XBLOCK=1024, num_warps=4, num_stages=1) del primals_17 buf14 = extern_kernels.convolution(buf13, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 12, 64, 64), (49152, 4096, 64, 1)) buf15 = buf14 del buf14 triton_poi_fused_convolution_3[grid(196608)](buf15, primals_19, 196608, XBLOCK=1024, num_warps=4, num_stages=1) del primals_19 buf16 = extern_kernels.convolution(buf15, primals_20, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 12, 31, 31), (11532, 961, 31, 1)) buf17 = buf16 del buf16 triton_poi_fused_convolution_4[grid(46128)](buf17, primals_21, 46128, XBLOCK=512, num_warps=4, num_stages=1) del primals_21 buf18 = torch.ops.aten.max_pool2d_with_indices.default(buf17, [7, 7 ], [3, 3]) buf19 = buf18[0] buf20 = buf18[1] del buf18 buf21 = extern_kernels.convolution(buf19, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 12, 9, 9), (972, 81, 9, 1)) buf22 = buf21 del buf21 triton_poi_fused_convolution_relu_5[grid(3888)](buf22, primals_23, 3888, XBLOCK=256, num_warps=4, num_stages=1) del primals_23 buf23 = extern_kernels.convolution(buf22, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 12, 9, 9), (972, 81, 9, 1)) buf24 = buf23 del buf23 triton_poi_fused_convolution_relu_5[grid(3888)](buf24, primals_25, 3888, XBLOCK=256, num_warps=4, num_stages=1) del primals_25 buf25 = extern_kernels.convolution(buf24, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf25, (4, 12, 9, 9), (972, 81, 9, 1)) buf26 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_6[grid(64)](buf26, 64, XBLOCK=64, num_warps=1, num_stages=1) buf27 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_7[grid(64)](buf27, 64, XBLOCK=64, num_warps=1, num_stages=1) buf28 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused__to_copy_6[grid(64)](buf28, 64, XBLOCK=64, num_warps=1, num_stages=1) buf29 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused_add_clamp_7[grid(64)](buf29, 64, XBLOCK=64, num_warps=1, num_stages=1) buf30 = empty_strided_cuda((64,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_8[grid(64)](buf30, 64, XBLOCK=64, num_warps=1, num_stages=1) buf32 = empty_strided_cuda((64, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_8[grid(64)](buf32, 64, XBLOCK=64, num_warps=1, num_stages=1) buf34 = extern_kernels.convolution(buf15, primals_28, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 12, 64, 64), (49152, 4096, 64, 1)) buf33 = empty_strided_cuda((4, 12, 64, 64), (49152, 4096, 64, 1), torch.float32) buf35 = buf33 del buf33 triton_poi_fused__unsafe_index_add_convolution_mul_sub_9[grid(196608)]( buf35, buf26, buf28, buf25, primals_27, buf29, buf30, buf27, buf32, buf34, primals_29, 196608, XBLOCK=512, num_warps=8, num_stages=1) del buf25 del buf34 del primals_27 del primals_29 buf36 = extern_kernels.convolution(buf35, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 50, 64, 64), (204800, 4096, 64, 1)) buf37 = buf36 del buf36 buf38 = empty_strided_cuda((4, 50, 64, 64), (204800, 4096, 64, 1), torch.float32) triton_poi_fused_convolution_mul_sigmoid_10[grid(819200)](buf37, primals_31, buf13, buf38, 819200, XBLOCK=512, num_warps=8, num_stages=1) del primals_31 return (buf38, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, buf2, buf5, buf8, buf11, buf13, buf15, buf17, buf19, buf20, buf22, buf24, buf26, buf27, buf28, buf29, buf30, buf32, buf35, buf37) def sequential(*args): """Advanced nn.Sequential. Args: nn.Sequential, nn.Module Returns: nn.Sequential """ if len(args) == 1: if isinstance(args[0], OrderedDict): raise NotImplementedError( 'sequential does not support OrderedDict input.') return args[0] modules = [] for module in args: if isinstance(module, nn.Sequential): for submodule in module.children(): modules.append(submodule) elif isinstance(module, nn.Module): modules.append(module) return nn.Sequential(*modules) def conv(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding= 1, bias=True, mode='CBR', negative_slope=0.2): L = [] for t in mode: if t == 'C': L.append(nn.Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias)) elif t == 'T': L.append(nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride= stride, padding=padding, bias=bias)) elif t == 'B': L.append(nn.BatchNorm2d(out_channels, momentum=0.9, eps=0.0001, affine=True)) elif t == 'I': L.append(nn.InstanceNorm2d(out_channels, affine=True)) elif t == 'R': L.append(nn.ReLU(inplace=True)) elif t == 'r': L.append(nn.ReLU(inplace=False)) elif t == 'L': L.append(nn.LeakyReLU(negative_slope=negative_slope, inplace=True)) elif t == 'l': L.append(nn.LeakyReLU(negative_slope=negative_slope, inplace=False) ) elif t == '2': L.append(nn.PixelShuffle(upscale_factor=2)) elif t == '3': L.append(nn.PixelShuffle(upscale_factor=3)) elif t == '4': L.append(nn.PixelShuffle(upscale_factor=4)) elif t == 'U': L.append(nn.Upsample(scale_factor=2, mode='nearest')) elif t == 'u': L.append(nn.Upsample(scale_factor=3, mode='nearest')) elif t == 'v': L.append(nn.Upsample(scale_factor=4, mode='nearest')) elif t == 'M': L.append(nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=0)) elif t == 'A': L.append(nn.AvgPool2d(kernel_size=kernel_size, stride=stride, padding=0)) else: raise NotImplementedError('Undefined type: ') return sequential(*L) class ESA(nn.Module): def __init__(self, channel=64, reduction=4, bias=True): super(ESA, self).__init__() self.r_nc = channel // reduction self.conv1 = nn.Conv2d(channel, self.r_nc, kernel_size=1) self.conv21 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=1) self.conv2 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, stride= 2, padding=0) self.conv3 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1) self.conv4 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1) self.conv5 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1) self.conv6 = nn.Conv2d(self.r_nc, channel, kernel_size=1) self.sigmoid = nn.Sigmoid() self.relu = nn.ReLU(inplace=True) def forward(self, x): x1 = self.conv1(x) x2 = F.max_pool2d(self.conv2(x1), kernel_size=7, stride=3) x2 = self.relu(self.conv3(x2)) x2 = self.relu(self.conv4(x2)) x2 = F.interpolate(self.conv5(x2), (x.size(2), x.size(3)), mode= 'bilinear', align_corners=False) x2 = self.conv6(x2 + self.conv21(x1)) return x.mul(self.sigmoid(x2)) class CFRBNew(nn.Module): def __init__(self, in_channels=50, out_channels=50, kernel_size=3, stride=1, padding=1, bias=True, mode='CL', d_rate=0.5, negative_slope=0.05): super(CFRBNew, self).__init__() self.d_nc = int(in_channels * d_rate) self.r_nc = in_channels assert mode[0] == 'C', 'convolutional layer first' self.conv1_d = conv(in_channels, self.d_nc, kernel_size=1, stride=1, padding=0, bias=bias, mode=mode[0]) self.conv1_r = conv(in_channels, self.r_nc, kernel_size, stride, padding, bias=bias, mode=mode[0]) self.conv2_d = conv(self.r_nc, self.d_nc, kernel_size=1, stride=1, padding=0, bias=bias, mode=mode[0]) self.conv2_r = conv(self.r_nc, self.r_nc, kernel_size, stride, padding, bias=bias, mode=mode[0]) self.conv3_d = conv(self.r_nc, self.d_nc, kernel_size=1, stride=1, padding=0, bias=bias, mode=mode[0]) self.conv3_r = conv(self.r_nc, self.r_nc, kernel_size, stride, padding, bias=bias, mode=mode[0]) self.conv4_d = conv(self.r_nc, self.d_nc, kernel_size, stride, padding, bias=bias, mode=mode[0]) self.conv1x1 = conv(self.d_nc * 4, out_channels, kernel_size=1, stride=1, padding=0, bias=bias, mode=mode[0]) self.act = conv(mode=mode[-1], negative_slope=negative_slope) self.esa = ESA(in_channels, reduction=4, bias=True) def forward(self, input_0): primals_1 = self.conv1_d.weight primals_2 = self.conv1_d.bias primals_4 = self.conv1_r.weight primals_5 = self.conv1_r.bias primals_6 = self.conv2_d.weight primals_7 = self.conv2_d.bias primals_8 = self.conv2_r.weight primals_9 = self.conv2_r.bias primals_10 = self.conv3_d.weight primals_11 = self.conv3_d.bias primals_12 = self.conv3_r.weight primals_13 = self.conv3_r.bias primals_14 = self.conv4_d.weight primals_15 = self.conv4_d.bias primals_16 = self.conv1x1.weight primals_17 = self.conv1x1.bias primals_18 = self.esa.conv1.weight primals_19 = self.esa.conv1.bias primals_28 = self.esa.conv21.weight primals_21 = self.esa.conv21.bias primals_20 = self.esa.conv2.weight primals_23 = self.esa.conv2.bias primals_22 = self.esa.conv3.weight primals_25 = self.esa.conv3.bias primals_24 = self.esa.conv4.weight primals_27 = self.esa.conv4.bias primals_26 = self.esa.conv5.weight primals_29 = self.esa.conv5.bias primals_30 = self.esa.conv6.weight primals_31 = self.esa.conv6.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31]) return output[0]
hduba/KAIR
CFRB
false
3,622
[ "MIT" ]
0
dbd7596c7e4a4667b9b7baac369fc6c02571fa58
https://github.com/hduba/KAIR/tree/dbd7596c7e4a4667b9b7baac369fc6c02571fa58
EqualConv2d
import torch from torch import nn from math import sqrt def equal_lr(module, name='weight'): """Rescale weights after every updates. """ EqualLR.apply(module, name) return module class EqualLR: def __init__(self, name): self.name = name def compute_weight(self, module): weight = getattr(module, self.name + '_orig') fan_in = weight.data.size(1) * weight.data[0][0].numel() return weight * sqrt(2 / fan_in) @staticmethod def apply(module, name): fn = EqualLR(name) weight = getattr(module, name) del module._parameters[name] module.register_parameter(name + '_orig', nn.Parameter(weight.data)) module.register_forward_pre_hook(fn) return fn def __call__(self, module, input): weight = self.compute_weight(module) setattr(module, self.name, weight) class EqualConv2d(nn.Module): def __init__(self, *args, **kwargs): super().__init__() conv = nn.Conv2d(*args, **kwargs) conv.weight.data.normal_() conv.bias.data.zero_() self.conv = equal_lr(conv) def forward(self, input): return self.conv(input) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn from math import sqrt assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.1767766952966369 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(primals_3, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(16)](buf2, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return buf2, buf0, primals_3, buf0 def equal_lr(module, name='weight'): """Rescale weights after every updates. """ EqualLR.apply(module, name) return module class EqualLR: def __init__(self, name): self.name = name def compute_weight(self, module): weight = getattr(module, self.name + '_orig') fan_in = weight.data.size(1) * weight.data[0][0].numel() return weight * sqrt(2 / fan_in) @staticmethod def apply(module, name): fn = EqualLR(name) weight = getattr(module, name) del module._parameters[name] module.register_parameter(name + '_orig', nn.Parameter(weight.data)) module.register_forward_pre_hook(fn) return fn def __call__(self, module, input): weight = self.compute_weight(module) setattr(module, self.name, weight) class EqualConv2dNew(nn.Module): def __init__(self, *args, **kwargs): super().__init__() conv = nn.Conv2d(*args, **kwargs) conv.weight.data.normal_() conv.bias.data.zero_() self.conv = equal_lr(conv) def forward(self, input_0): primals_2 = self.conv.bias primals_1 = self.conv.weight_orig primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
hologerry/style-based-gan-pytorch
EqualConv2d
false
3,623
[ "MIT" ]
0
1a694fb3ea0288f1aaaa43aa67a570d908d9dc27
https://github.com/hologerry/style-based-gan-pytorch/tree/1a694fb3ea0288f1aaaa43aa67a570d908d9dc27
NoiseInjection
import torch from torch import nn class NoiseInjection(nn.Module): def __init__(self, channel): super().__init__() self.weight = nn.Parameter(torch.zeros(1, channel, 1, 1)) def forward(self, image, noise): return image + self.weight * noise def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channel': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x3, xmask) tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tl.store(out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](primals_3, primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_3 return buf0, primals_2 class NoiseInjectionNew(nn.Module): def __init__(self, channel): super().__init__() self.weight = nn.Parameter(torch.zeros(1, channel, 1, 1)) def forward(self, input_0, input_1): primals_1 = self.weight primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
hologerry/style-based-gan-pytorch
NoiseInjection
false
3,624
[ "MIT" ]
0
1a694fb3ea0288f1aaaa43aa67a570d908d9dc27
https://github.com/hologerry/style-based-gan-pytorch/tree/1a694fb3ea0288f1aaaa43aa67a570d908d9dc27
DocUnetLoss_DL_batch
import torch import torch.nn as nn import torch.nn.functional as F class DocUnetLoss_DL_batch(nn.Module): """ 只使用一个unet的loss 目前使用这个loss训练的比较好 """ def __init__(self, r=0.0, reduction='mean'): super(DocUnetLoss_DL_batch, self).__init__() assert reduction in ['mean', 'sum' ], " reduction must in ['mean','sum']" self.r = r self.reduction = reduction def forward(self, y, label): _bs, _n, _h, _w = y.size() d = y - label loss1 = [] for d_i in d: loss1.append(torch.abs(d_i).mean() - self.r * torch.abs(d_i.mean()) ) loss1 = torch.stack(loss1) loss2 = F.mse_loss(y, label, reduction=self.reduction) if self.reduction == 'mean': loss1 = loss1.mean() elif self.reduction == 'sum': loss1 = loss1.sum() return loss1 + loss2 def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_abs_mean_stack_0(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.sum(tmp4, 1)[:, None] tmp7 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp10 = 64.0 tmp11 = tmp6 / tmp10 tmp12 = tmp9 / tmp10 tmp13 = tl_math.abs(tmp12) tmp14 = 0.0 tmp15 = tmp13 * tmp14 tmp16 = tmp11 - tmp15 tl.store(out_ptr2 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None) @triton.jit def triton_per_fused_abs_mean_stack_1(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (64 + r0), None) tmp1 = tl.load(in_ptr1 + (64 + r0), None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.sum(tmp4, 1)[:, None] tmp7 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp10 = 64.0 tmp11 = tmp6 / tmp10 tmp12 = tmp9 / tmp10 tmp13 = tl_math.abs(tmp12) tmp14 = 0.0 tmp15 = tmp13 * tmp14 tmp16 = tmp11 - tmp15 tl.store(out_ptr2 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None) @triton.jit def triton_per_fused_abs_mean_stack_2(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (128 + r0), None) tmp1 = tl.load(in_ptr1 + (128 + r0), None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.sum(tmp4, 1)[:, None] tmp7 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp10 = 64.0 tmp11 = tmp6 / tmp10 tmp12 = tmp9 / tmp10 tmp13 = tl_math.abs(tmp12) tmp14 = 0.0 tmp15 = tmp13 * tmp14 tmp16 = tmp11 - tmp15 tl.store(out_ptr2 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None) @triton.jit def triton_per_fused_abs_mean_stack_3(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (192 + r0), None) tmp1 = tl.load(in_ptr1 + (192 + r0), None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.sum(tmp4, 1)[:, None] tmp7 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp10 = 64.0 tmp11 = tmp6 / tmp10 tmp12 = tmp9 / tmp10 tmp13 = tl_math.abs(tmp12) tmp14 = 0.0 tmp15 = tmp13 * tmp14 tmp16 = tmp11 - tmp15 tl.store(out_ptr2 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None) @triton.jit def triton_per_fused_mean_4(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp3, None) @triton.jit def triton_per_fused_add_mean_mse_loss_5(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp7 = tl.load(in_out_ptr0 + 0) tmp8 = tl.broadcast_to(tmp7, [1]) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = 256.0 tmp12 = tmp6 / tmp11 tmp13 = tmp10 + tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp13, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf12 = empty_strided_cuda((4,), (1,), torch.float32) buf8 = reinterpret_tensor(buf12, (1,), (1,), 0) get_raw_stream(0) triton_per_fused_abs_mean_stack_0[grid(1)](arg0_1, arg1_1, buf8, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf9 = reinterpret_tensor(buf12, (1,), (1,), 1) triton_per_fused_abs_mean_stack_1[grid(1)](arg0_1, arg1_1, buf9, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf10 = reinterpret_tensor(buf12, (1,), (1,), 2) triton_per_fused_abs_mean_stack_2[grid(1)](arg0_1, arg1_1, buf10, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf11 = reinterpret_tensor(buf12, (1,), (1,), 3) triton_per_fused_abs_mean_stack_3[grid(1)](arg0_1, arg1_1, buf11, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf13 = empty_strided_cuda((), (), torch.float32) triton_per_fused_mean_4[grid(1)](buf12, buf13, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf10 del buf11 del buf12 del buf8 del buf9 buf15 = buf13 del buf13 triton_per_fused_add_mean_mse_loss_5[grid(1)](buf15, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf15, class DocUnetLoss_DL_batchNew(nn.Module): """ 只使用一个unet的loss 目前使用这个loss训练的比较好 """ def __init__(self, r=0.0, reduction='mean'): super(DocUnetLoss_DL_batchNew, self).__init__() assert reduction in ['mean', 'sum' ], " reduction must in ['mean','sum']" self.r = r self.reduction = reduction def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
hologerry/DewarpNet
DocUnetLoss_DL_batch
false
3,625
[ "MIT" ]
0
b0a11b9fbb98bd124e65d3165ce177d9ebf2e836
https://github.com/hologerry/DewarpNet/tree/b0a11b9fbb98bd124e65d3165ce177d9ebf2e836
LgRegv
import torch import torch.nn as nn class LgRegv(torch.nn.Module): """ TODO: pre-training from power to voronoi """ def __init__(self, dim, nla): super(LgRegv, self).__init__() self.linear = nn.Linear(dim, nla, bias=False) def forward(self, x): ba = -torch.sum((self.linear.weight / 2) ** 2, dim=1) y_hat = self.linear(x) + ba y_hat = torch.sigmoid(y_hat) return y_hat def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4, 'nla': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_neg_pow_sigmoid_sum_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3 * tmp3 tmp6 = tmp5 * tmp2 tmp7 = tmp6 * tmp6 tmp8 = tmp4 + tmp7 tmp10 = tmp9 * tmp2 tmp11 = tmp10 * tmp10 tmp12 = tmp8 + tmp11 tmp14 = tmp13 * tmp2 tmp15 = tmp14 * tmp14 tmp16 = tmp12 + tmp15 tmp17 = -tmp16 tmp18 = tmp0 + tmp17 tmp19 = tl.sigmoid(tmp18) tl.store(in_out_ptr0 + x2, tmp19, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_div_neg_pow_sigmoid_sum_0[grid(256)](buf1, primals_1, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf1, primals_1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), buf1 class LgRegvNew(torch.nn.Module): """ TODO: pre-training from power to voronoi """ def __init__(self, dim, nla): super(LgRegvNew, self).__init__() self.linear = nn.Linear(dim, nla, bias=False) def forward(self, input_0): primals_1 = self.linear.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
horsepurve/DeepVoro
LgRegv
false
3,626
[ "MIT" ]
0
1b67a8e0d51e1c966a2af96d4b6a495f8390f608
https://github.com/horsepurve/DeepVoro/tree/1b67a8e0d51e1c966a2af96d4b6a495f8390f608
distLinear
import torch import torch.nn as nn from torch.nn.utils.weight_norm import WeightNorm class distLinear(nn.Module): def __init__(self, indim, outdim): super(distLinear, self).__init__() self.L = nn.Linear(indim, outdim, bias=False) self.class_wise_learnable_norm = True if self.class_wise_learnable_norm: WeightNorm.apply(self.L, 'weight', dim=0) if outdim <= 200: self.scale_factor = 2 else: self.scale_factor = 10 def forward(self, x): x_norm = torch.norm(x, p=2, dim=1).unsqueeze(1).expand_as(x) x_normalized = x.div(x_norm + 1e-05) if not self.class_wise_learnable_norm: L_norm = torch.norm(self.L.weight.data, p=2, dim=1).unsqueeze(1 ).expand_as(self.L.weight.data) self.L.weight.data = self.L.weight.data.div(L_norm + 1e-05) cos_dist = self.L(x_normalized) scores = self.scale_factor * cos_dist return scores def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'indim': 4, 'outdim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn.utils.weight_norm import WeightNorm assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__weight_norm_interface_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tl.store(out_ptr0 + x0, tmp11, xmask) @triton.jit def triton_poi_fused__weight_norm_interface_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 / tmp2 tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_div_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-05 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused_mul_3(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1), (1, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32) get_raw_stream(0) triton_poi_fused__weight_norm_interface_0[grid(4)](primals_3, buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(16)](primals_3, primals_2, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_2[grid(256)](primals_1, buf2, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_1 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 triton_poi_fused_mul_3[grid(256)](buf4, 256, XBLOCK=256, num_warps= 4, num_stages=1) return buf4, buf1, primals_2, primals_3, buf0, reinterpret_tensor(buf2, (64, 4), (4, 1), 0) class distLinearNew(nn.Module): def __init__(self, indim, outdim): super(distLinearNew, self).__init__() self.L = nn.Linear(indim, outdim, bias=False) self.class_wise_learnable_norm = True if self.class_wise_learnable_norm: WeightNorm.apply(self.L, 'weight', dim=0) if outdim <= 200: self.scale_factor = 2 else: self.scale_factor = 10 def forward(self, input_0): primals_2 = self.L.weight_g primals_3 = self.L.weight_v primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
horsepurve/DeepVoro
distLinear
false
3,627
[ "MIT" ]
0
1b67a8e0d51e1c966a2af96d4b6a495f8390f608
https://github.com/horsepurve/DeepVoro/tree/1b67a8e0d51e1c966a2af96d4b6a495f8390f608
Conv2d_fw
import torch import torch.nn as nn import torch.nn.functional as F class Conv2d_fw(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True): super(Conv2d_fw, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias) self.weight.fast = None if self.bias is not None: self.bias.fast = None def forward(self, x): if self.bias is None: if self.weight.fast is not None: out = F.conv2d(x, self.weight.fast, None, stride=self. stride, padding=self.padding) else: out = super(Conv2d_fw, self).forward(x) elif self.weight.fast is not None and self.bias.fast is not None: out = F.conv2d(x, self.weight.fast, self.bias.fast, stride=self .stride, padding=self.padding) else: out = super(Conv2d_fw, self).forward(x) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(16)](buf1, primals_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 return buf1, primals_2, primals_3 class Conv2d_fwNew(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True): super(Conv2d_fwNew, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias) self.weight.fast = None if self.bias is not None: self.bias.fast = None def forward(self, input_0): primals_2 = self.weight primals_1 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
horsepurve/DeepVoro
Conv2d_fw
false
3,628
[ "MIT" ]
0
1b67a8e0d51e1c966a2af96d4b6a495f8390f608
https://github.com/horsepurve/DeepVoro/tree/1b67a8e0d51e1c966a2af96d4b6a495f8390f608
EdgeGCN
from torch.nn import Module import torch from torch.nn.modules.module import Module import torch.nn as nn class EdgeGCN(Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, include_adj=True, bias=True): super(EdgeGCN, self).__init__() self.include_adj = include_adj self.in_features = in_features + 1 if self.include_adj else in_features self.out_features = out_features self.fc = nn.Linear(self.in_features, self.out_features, bias=bias) def forward(self, feat, adj): feat_diff = (feat.unsqueeze(0).repeat(feat.shape[0], 1, 1) - feat. unsqueeze(1).repeat(1, feat.shape[0], 1)).abs() if self.include_adj: x = torch.cat((feat_diff, adj.unsqueeze(2)), 2).view(feat.shape [0] * feat.shape[0], -1) else: x = feat_diff.view(feat.shape[0] * feat.shape[0], -1) output = self.fc(x) return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module from torch.nn.modules.module import Module import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 % 4 x2 = xindex // 20 x3 = xindex // 5 x4 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (4 * x2 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp7 = tmp5 - tmp6 tmp8 = tl_math.abs(tmp7) tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tl.full([1], 5, tl.int64) tmp14 = tl.load(in_ptr1 + x3, tmp11 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tl.where(tmp4, tmp10, tmp14) tl.store(out_ptr0 + x4, tmp15, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 5), (5, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(80)](primals_1, primals_2, buf0, 80, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (16, 5), ( 5, 1), 0), reinterpret_tensor(primals_3, (5, 4), (1, 5), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 return buf1, reinterpret_tensor(buf0, (16, 5), (5, 1), 0) class EdgeGCNNew(Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, include_adj=True, bias=True): super(EdgeGCNNew, self).__init__() self.include_adj = include_adj self.in_features = in_features + 1 if self.include_adj else in_features self.out_features = out_features self.fc = nn.Linear(self.in_features, self.out_features, bias=bias) def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' def forward(self, input_0, input_1): primals_3 = self.fc.weight primals_4 = self.fc.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
hou-yz/pygcn
EdgeGCN
false
3,629
[ "MIT" ]
0
26195954035c5eaae2d6e086cfec24cad2642f2e
https://github.com/hou-yz/pygcn/tree/26195954035c5eaae2d6e086cfec24cad2642f2e
DimReduction
import torch import torch.nn as nn class residual_block(nn.Module): def __init__(self, nChn=512): super(residual_block, self).__init__() self.block = nn.Sequential(nn.Linear(nChn, nChn, bias=False), nn. ReLU(inplace=True), nn.Linear(nChn, nChn, bias=False), nn.ReLU( inplace=True)) def forward(self, x): tt = self.block(x) x = x + tt return x class DimReduction(nn.Module): def __init__(self, n_channels, m_dim=512, numLayer_Res=0): super(DimReduction, self).__init__() self.fc1 = nn.Linear(n_channels, m_dim, bias=False) self.relu1 = nn.ReLU(inplace=True) self.numRes = numLayer_Res self.resBlocks = [] for ii in range(numLayer_Res): self.resBlocks.append(residual_block(m_dim)) self.resBlocks = nn.Sequential(*self.resBlocks) def forward(self, x): x = self.fc1(x) x = self.relu1(x) if self.numRes > 0: x = self.resBlocks(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_view_0(in_out_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(out_ptr0 + x0, tmp2, None) tl.store(out_ptr1 + x0, tmp4, None) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (512, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 512), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 512), (8192, 2048, 512, 1), 0 ) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_view_0[grid(32768)](buf1, buf2, buf3, 32768, XBLOCK=128, num_warps=4, num_stages=1) del buf1 return buf2, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf3 class residual_block(nn.Module): def __init__(self, nChn=512): super(residual_block, self).__init__() self.block = nn.Sequential(nn.Linear(nChn, nChn, bias=False), nn. ReLU(inplace=True), nn.Linear(nChn, nChn, bias=False), nn.ReLU( inplace=True)) def forward(self, x): tt = self.block(x) x = x + tt return x class DimReductionNew(nn.Module): def __init__(self, n_channels, m_dim=512, numLayer_Res=0): super(DimReductionNew, self).__init__() self.fc1 = nn.Linear(n_channels, m_dim, bias=False) self.relu1 = nn.ReLU(inplace=True) self.numRes = numLayer_Res self.resBlocks = [] for ii in range(numLayer_Res): self.resBlocks.append(residual_block(m_dim)) self.resBlocks = nn.Sequential(*self.resBlocks) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
hrzhang1123/DTFD-MIL
DimReduction
false
3,630
[ "MIT" ]
0
5cf22db83d0c031e69b17d5b668b546940d829bc
https://github.com/hrzhang1123/DTFD-MIL/tree/5cf22db83d0c031e69b17d5b668b546940d829bc
RNNMLClassification
import torch import torch.nn as nn class RNNMLClassification(nn.Module): def __init__(self, input_size, hidden_size, output_size): super(RNNMLClassification, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.output_size = output_size self.i2h = nn.Linear(input_size + hidden_size, hidden_size) self.i2o = nn.Linear(input_size + hidden_size, output_size) self.o2o = nn.Linear(hidden_size + output_size, output_size) self.softmax = nn.LogSoftmax(dim=1) def forward(self, input, hidden): combined = torch.cat((input, hidden), 1) hidden = self.i2h(combined) output = self.i2o(combined) output = self.o2o(torch.cat((hidden, output), 1)) output = self.softmax(output) return output, hidden def init_hidden(self): return torch.zeros(1, self.hidden_size) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 8 * x1), tmp0, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 8), (8, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 8), (8, 1)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 buf4 = empty_strided_cuda((4, 8), (8, 1), torch.float32) buf2 = reinterpret_tensor(buf4, (4, 4), (8, 1), 4) extern_kernels.addmm(primals_6, buf0, reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf2) del primals_5 del primals_6 buf3 = reinterpret_tensor(buf4, (4, 4), (8, 1), 0) triton_poi_fused_cat_1[grid(16)](buf1, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf5) del primals_8 buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__log_softmax_2[grid(16)](buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = buf5 del buf5 triton_poi_fused__log_softmax_3[grid(16)](buf6, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf6 return buf7, buf1, buf0, buf4, buf7, primals_7 class RNNMLClassificationNew(nn.Module): def __init__(self, input_size, hidden_size, output_size): super(RNNMLClassificationNew, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.output_size = output_size self.i2h = nn.Linear(input_size + hidden_size, hidden_size) self.i2o = nn.Linear(input_size + hidden_size, output_size) self.o2o = nn.Linear(hidden_size + output_size, output_size) self.softmax = nn.LogSoftmax(dim=1) def init_hidden(self): return torch.zeros(1, self.hidden_size) def forward(self, input_0, input_1): primals_3 = self.i2h.weight primals_4 = self.i2h.bias primals_5 = self.i2o.weight primals_6 = self.i2o.bias primals_7 = self.o2o.weight primals_8 = self.o2o.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0], output[1]
hotbaby/kkb-nlp
RNNMLClassification
false
3,631
[ "MIT" ]
0
614cd0f37aa969d21b2fbe3d9f8b2b08db1d0eb1
https://github.com/hotbaby/kkb-nlp/tree/614cd0f37aa969d21b2fbe3d9f8b2b08db1d0eb1
FcCat
import torch import torch.nn as nn class FcCat(nn.Module): def __init__(self, nIn, nOut): super(FcCat, self).__init__() self.fc = nn.Linear(nIn, nOut, bias=False) def forward(self, x): out = torch.cat((x, self.fc(x)), 1) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nIn': 4, 'nOut': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_2, buf0, buf1, 512, XBLOCK=256, num_warps=4, num_stages=1) del buf0 return buf1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0) class FcCatNew(nn.Module): def __init__(self, nIn, nOut): super(FcCatNew, self).__init__() self.fc = nn.Linear(nIn, nOut, bias=False) def forward(self, input_0): primals_1 = self.fc.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
huangzsdy/pytorch_basic_learning
FcCat
false
3,633
[ "Apache-2.0" ]
0
7880bc3fcee1d38623d93fa2a36482ccde0e335a
https://github.com/huangzsdy/pytorch_basic_learning/tree/7880bc3fcee1d38623d93fa2a36482ccde0e335a
Fadein
from _paritybench_helpers import _mock_config import torch import torch.nn as nn import torch.utils.data class Fadein(nn.Module): def __init__(self, cfg): super(Fadein, self).__init__() self.alpha = 0.0 def update_alpha(self, delta): self.alpha = self.alpha + delta self.alpha = max(0, min(self.alpha, 1.0)) def forward(self, x): return torch.add(x[0].mul(1.0 - self.alpha), x[1].mul(self.alpha)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'cfg': _mock_config()}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr0 + (64 + x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = 0.0 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class FadeinNew(nn.Module): def __init__(self, cfg): super(FadeinNew, self).__init__() self.alpha = 0.0 def update_alpha(self, delta): self.alpha = self.alpha + delta self.alpha = max(0, min(self.alpha, 1.0)) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
hyunobae/SRGAN
Fadein
false
3,634
[ "MIT" ]
0
9a967312c08e608833d2037398948617e1200c35
https://github.com/hyunobae/SRGAN/tree/9a967312c08e608833d2037398948617e1200c35
MulMCFC
import collections import torch import torch.utils.data from torch import nn def get_redistribution(kind: 'str', num_states: 'int', num_features: 'int'= None, num_out: 'int'=None, normaliser: 'nn.Module'=None, **kwargs): if kind == 'linear': return LinearRedistribution(num_states, num_features, num_out, normaliser) elif kind == 'outer': return OuterRedistribution(num_states, num_features, num_out, normaliser, **kwargs) elif kind == 'gate': return GateRedistribution(num_states, num_features, num_out, normaliser ) else: raise ValueError('unknown kind of redistribution: {}'.format(kind)) class SummaryWriterNamespaceNoLoggingScope: def __init__(self, writer): self._writer = writer def __enter__(self): self._writer._logging_enabled = False def __exit__(self, type, value, traceback): self._writer._logging_enabled = True return False class DummySummaryWriter: def __init__(self, **kwargs): self._logging_enabled = False pass def add_scalar(self, name, value, verbose_only=True): pass def add_summary(self, name, tensor, verbose_only=True): pass def add_histogram(self, name, tensor, verbose_only=True): pass def add_tensor(self, name, tensor, verbose_only=True): pass def print(self, name, tensor, verbose_only=True): pass def namespace(self, name): return self def every(self, epoch_interval): return self def verbose(self, verbose): return self def no_logging(self): return SummaryWriterNamespaceNoLoggingScope(self) class NoRandomScope: def __init__(self, module): self._module = module def __enter__(self): self._module._disable_random() def __exit__(self, type, value, traceback): self._module._enable_random() return False class ExtendedTorchModule(torch.nn.Module): def __init__(self, default_name, *args, writer=None, name=None, **kwargs): super().__init__() if writer is None: writer = DummySummaryWriter() self.writer = writer.namespace(default_name if name is None else name) self.allow_random = True def set_parameter(self, name, value): parameter = getattr(self, name, None) if isinstance(parameter, torch.nn.Parameter): parameter.fill_(value) for module in self.children(): if isinstance(module, ExtendedTorchModule): module.set_parameter(name, value) def regualizer(self, merge_in=None): regualizers = collections.defaultdict(int) if merge_in is not None: for key, value in merge_in.items(): self.writer.add_scalar(f'regualizer/{key}', value) regualizers[key] += value for module in self.children(): if isinstance(module, ExtendedTorchModule): for key, value in module.regualizer().items(): regualizers[key] += value return regualizers def optimize(self, loss): for module in self.children(): if isinstance(module, ExtendedTorchModule): module.optimize(loss) def log_gradients(self): for name, parameter in self.named_parameters(recurse=False): if parameter.requires_grad: gradient, *_ = parameter.grad.data self.writer.add_summary(f'{name}/grad', gradient) self.writer.add_histogram(f'{name}/grad', gradient) for module in self.children(): if isinstance(module, ExtendedTorchModule): module.log_gradients() def no_internal_logging(self): return self.writer.no_logging() def _disable_random(self): self.allow_random = False for module in self.children(): if isinstance(module, ExtendedTorchModule): module._disable_random() def _enable_random(self): self.allow_random = True for module in self.children(): if isinstance(module, ExtendedTorchModule): module._enable_random() def no_random(self): return NoRandomScope(self) class NormalisedSigmoid(nn.Module): """ Normalised logistic sigmoid function. """ def __init__(self, p: 'float'=1, dim: 'int'=-1): super().__init__() self.p = p self.dim = dim def forward(self, s: 'torch.Tensor') ->torch.Tensor: a = torch.sigmoid(s) return torch.nn.functional.normalize(a, p=self.p, dim=self.dim) class Redistribution(nn.Module): """ Base class for modules that generate redistribution vectors/matrices. """ def __init__(self, num_states: 'int', num_features: 'int'=None, num_out: 'int'=None, normaliser: 'nn.Module'=None): """ Parameters ---------- num_states : int The number of states this redistribution is to be applied on. num_features : int, optional The number of features to use for configuring the redistribution. If the redistribution is not input-dependent, this argument will be ignored. num_out : int, optional The number of outputs to redistribute the states to. If nothing is specified, the redistribution matrix is assumed to be square. normaliser : Module, optional Function to use for normalising the redistribution matrix. """ super().__init__() self.num_features = num_features self.num_states = num_states self.num_out = num_out or num_states self.normaliser = normaliser or NormalisedSigmoid(dim=-1) def _compute(self, x: 'torch.Tensor') ->torch.Tensor: raise NotImplementedError('subclass must implement this method') def forward(self, x: 'torch.Tensor') ->torch.Tensor: r = self._compute(x) return self.normaliser(r) class Gate(Redistribution): """ Classic gate as used in e.g. LSTMs. Notes ----- The vector that is computed by this module gives rise to a diagonal redistribution matrix, i.e. a redistribution matrix that does not really redistribute (not normalised). """ def __init__(self, num_states, num_features, num_out=None, sigmoid=None): super().__init__(num_states, num_features, 1, sigmoid or nn.Sigmoid()) self.fc = nn.Linear(num_features, num_states) self.reset_parameters() def reset_parameters(self): nn.init.orthogonal_(self.fc.weight) nn.init.zeros_(self.fc.bias) def _compute(self, x: 'torch.Tensor') ->torch.Tensor: return self.fc(x) class GateRedistribution(Redistribution): """ Gate-like redistribution that only depends on input. This module directly computes all entries for the redistribution matrix from a linear combination of the input values and is normalised by the activation function. """ def __init__(self, num_states, num_features, num_out=None, normaliser=None ): super().__init__(num_states, num_features, num_out, normaliser) self.fc = nn.Linear(num_features, self.num_states * self.num_out) self.reset_parameters() def reset_parameters(self): nn.init.orthogonal_(self.fc.weight) nn.init.zeros_(self.fc.bias) if self.num_states == self.num_out: with torch.no_grad(): self.fc.bias[0::self.num_out + 1] = 3 def _compute(self, x: 'torch.Tensor') ->torch.Tensor: logits = self.fc(x) return logits.view(-1, self.num_states, self.num_out) class LinearRedistribution(Redistribution): """ Redistribution by normalising a learned matrix. This module has an unnormalised version of the redistribution matrix as parameters and is normalised by applying a non-linearity (the normaliser). The redistribution does not depend on any of the input values, but is updated using the gradients to fit the data. """ def __init__(self, num_states, num_features=0, num_out=None, normaliser =None): super(LinearRedistribution, self).__init__(num_states, 0, num_out, normaliser) self.r = nn.Parameter(torch.empty(self.num_states, self.num_out), requires_grad=True) self.reset_parameters() @torch.no_grad() def reset_parameters(self): if self.num_states == self.num_out: nn.init.eye_(self.r) if type(self.normaliser) is NormalisedSigmoid: torch.mul(self.r, 2, out=self.r) torch.sub(self.r, 1, out=self.r) else: nn.init.orthogonal_(self.r) def _compute(self, x: 'torch.Tensor') ->torch.Tensor: return self.r.unsqueeze(0) class OuterRedistribution(Redistribution): """ Redistribution by (weighted) outer product of two input-dependent vectors. This module computes the entries for the redistribution matrix as the outer product of two vectors that are linear combinations of the input values. There is an option to include a weight matrix parameter to weight each entry in the resulting matrix, which is then normalised using a non-linearity. The weight matrix parameter is updated through the gradients to fit the data. """ def __init__(self, num_states, num_features, num_out=None, normaliser= None, weighted: 'bool'=False): """ Parameters ---------- weighted : bool, optional Whether or not to use a weighted outer product. """ super(OuterRedistribution, self).__init__(num_states, num_features, num_out, normaliser) self.weighted = weighted self.r = nn.Parameter(torch.empty(self.num_states, self.num_out), requires_grad=weighted) self.fc1 = nn.Linear(num_features, self.num_states) self.fc2 = nn.Linear(num_features, self.num_out) self.phi = lambda x: x self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.r) nn.init.orthogonal_(self.fc1.weight) nn.init.zeros_(self.fc1.bias) nn.init.orthogonal_(self.fc2.weight) nn.init.zeros_(self.fc2.bias) def _compute(self, x: 'torch.Tensor') ->torch.Tensor: a1 = self.phi(self.fc1(x)) a2 = self.phi(self.fc2(x)) outer = a1.unsqueeze(-1) * a2.unsqueeze(-2) if self.weighted: outer *= self.r return outer class MCFullyConnected(ExtendedTorchModule): def __init__(self, in_features: 'int', out_features: 'int', **kwargs): super().__init__('MCFC', **kwargs) self.mass_input_size = in_features self.aux_input_size = 1 self.hidden_size = out_features self.normaliser = nn.Softmax(dim=-1) self.out_gate = Gate(self.hidden_size, self.aux_input_size) self.junction = get_redistribution('linear', num_states=self. mass_input_size, num_features=self.aux_input_size, num_out=self .hidden_size, normaliser=self.normaliser) @torch.no_grad() def reset_parameters(self): self.out_gate.reset_parameters() self.junction.reset_parameters() def log_gradients(self): for name, parameter in self.named_parameters(): gradient, *_ = parameter.grad.data self.writer.add_summary(f'{name}/grad', gradient) self.writer.add_histogram(f'{name}/grad', gradient) def regualizer(self, merge_in=None): r1 = -torch.mean(self.junction.r ** 2) r2 = -torch.mean(self.out_gate.fc.weight ** 2) r3 = -torch.mean(self.out_gate.fc.bias ** 2) return super().regualizer({'W': r1 + r2 + r3}) def forward(self, x): x_m, x_a = x, x.new_ones(1) j = self.junction(x_a) o = self.out_gate(x_a) m_in = torch.matmul(x_m.unsqueeze(-2), j).squeeze(-2) return o * m_in class MulMCFC(ExtendedTorchModule): def __init__(self, in_features, out_features, **kwargs): super().__init__('MulMCFC', **kwargs) self.mcfc = MCFullyConnected(in_features, out_features + 1, **kwargs) self.bias = nn.Parameter(torch.zeros(out_features)) def reset_parameters(self): self.mcfc.reset_parameters() nn.init.zeros_(self.bias) def forward(self, x): log_sum = self.mcfc(torch.log(x))[:, :-1] return torch.exp(log_sum + self.bias) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import collections import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 5 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 5 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 5 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 5 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 + 5 * x0), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp0 - tmp8 tmp10 = tl_math.exp(tmp9) tmp11 = tmp1 - tmp8 tmp12 = tl_math.exp(tmp11) tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tl_math.exp(tmp14) tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp7 - tmp8 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp22, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 5 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tl_math.exp(tmp2) tmp5 = tmp3 / tmp4 tl.store(out_ptr0 + x2, tmp5, xmask) @triton.jit def triton_poi_fused_view_2(out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp0 = 1.0 tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp0, None) @triton.jit def triton_poi_fused_view_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.log(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_add_exp_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + (x0 + 5 * x1), xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp5 = tmp3 + tmp4 tmp6 = tl_math.exp(tmp5) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 5), (5, 1)) assert_size_stride(primals_3, (5, 1), (1, 1)) assert_size_stride(primals_4, (5,), (1,)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32) buf1 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(4)](primals_2, buf0, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((1, 4, 5), (20, 5, 1), torch.float32) triton_poi_fused__softmax_1[grid(20)](primals_2, buf0, buf1, buf2, 20, XBLOCK=32, num_warps=1, num_stages=1) del buf0 del buf1 del primals_2 buf3 = empty_strided_cuda((1, 1), (1, 1), torch.float32) triton_poi_fused_view_2[grid(1)](buf3, 1, XBLOCK=1, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((1, 5), (5, 1), torch.float32) extern_kernels.addmm(primals_4, buf3, reinterpret_tensor(primals_3, (1, 5), (1, 1), 0), alpha=1, beta=1, out=buf4) del primals_3 del primals_4 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_view_3[grid(16)](primals_1, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf6 = empty_strided_cuda((4, 5), (5, 1), torch.float32) extern_kernels.mm(buf5, reinterpret_tensor(buf2, (4, 5), (5, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_exp_4[grid(16)](buf4, buf6, primals_5, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 return buf7, buf2, buf3, buf4, buf6, buf7, reinterpret_tensor(buf5, (4, 4), (1, 4), 0) def get_redistribution(kind: 'str', num_states: 'int', num_features: 'int'= None, num_out: 'int'=None, normaliser: 'nn.Module'=None, **kwargs): if kind == 'linear': return LinearRedistribution(num_states, num_features, num_out, normaliser) elif kind == 'outer': return OuterRedistribution(num_states, num_features, num_out, normaliser, **kwargs) elif kind == 'gate': return GateRedistribution(num_states, num_features, num_out, normaliser ) else: raise ValueError('unknown kind of redistribution: {}'.format(kind)) class SummaryWriterNamespaceNoLoggingScope: def __init__(self, writer): self._writer = writer def __enter__(self): self._writer._logging_enabled = False def __exit__(self, type, value, traceback): self._writer._logging_enabled = True return False class DummySummaryWriter: def __init__(self, **kwargs): self._logging_enabled = False pass def add_scalar(self, name, value, verbose_only=True): pass def add_summary(self, name, tensor, verbose_only=True): pass def add_histogram(self, name, tensor, verbose_only=True): pass def add_tensor(self, name, tensor, verbose_only=True): pass def print(self, name, tensor, verbose_only=True): pass def namespace(self, name): return self def every(self, epoch_interval): return self def verbose(self, verbose): return self def no_logging(self): return SummaryWriterNamespaceNoLoggingScope(self) class NoRandomScope: def __init__(self, module): self._module = module def __enter__(self): self._module._disable_random() def __exit__(self, type, value, traceback): self._module._enable_random() return False class ExtendedTorchModule(torch.nn.Module): def __init__(self, default_name, *args, writer=None, name=None, **kwargs): super().__init__() if writer is None: writer = DummySummaryWriter() self.writer = writer.namespace(default_name if name is None else name) self.allow_random = True def set_parameter(self, name, value): parameter = getattr(self, name, None) if isinstance(parameter, torch.nn.Parameter): parameter.fill_(value) for module in self.children(): if isinstance(module, ExtendedTorchModule): module.set_parameter(name, value) def regualizer(self, merge_in=None): regualizers = collections.defaultdict(int) if merge_in is not None: for key, value in merge_in.items(): self.writer.add_scalar(f'regualizer/{key}', value) regualizers[key] += value for module in self.children(): if isinstance(module, ExtendedTorchModule): for key, value in module.regualizer().items(): regualizers[key] += value return regualizers def optimize(self, loss): for module in self.children(): if isinstance(module, ExtendedTorchModule): module.optimize(loss) def log_gradients(self): for name, parameter in self.named_parameters(recurse=False): if parameter.requires_grad: gradient, *_ = parameter.grad.data self.writer.add_summary(f'{name}/grad', gradient) self.writer.add_histogram(f'{name}/grad', gradient) for module in self.children(): if isinstance(module, ExtendedTorchModule): module.log_gradients() def no_internal_logging(self): return self.writer.no_logging() def _disable_random(self): self.allow_random = False for module in self.children(): if isinstance(module, ExtendedTorchModule): module._disable_random() def _enable_random(self): self.allow_random = True for module in self.children(): if isinstance(module, ExtendedTorchModule): module._enable_random() def no_random(self): return NoRandomScope(self) class NormalisedSigmoid(nn.Module): """ Normalised logistic sigmoid function. """ def __init__(self, p: 'float'=1, dim: 'int'=-1): super().__init__() self.p = p self.dim = dim def forward(self, s: 'torch.Tensor') ->torch.Tensor: a = torch.sigmoid(s) return torch.nn.functional.normalize(a, p=self.p, dim=self.dim) class Redistribution(nn.Module): """ Base class for modules that generate redistribution vectors/matrices. """ def __init__(self, num_states: 'int', num_features: 'int'=None, num_out: 'int'=None, normaliser: 'nn.Module'=None): """ Parameters ---------- num_states : int The number of states this redistribution is to be applied on. num_features : int, optional The number of features to use for configuring the redistribution. If the redistribution is not input-dependent, this argument will be ignored. num_out : int, optional The number of outputs to redistribute the states to. If nothing is specified, the redistribution matrix is assumed to be square. normaliser : Module, optional Function to use for normalising the redistribution matrix. """ super().__init__() self.num_features = num_features self.num_states = num_states self.num_out = num_out or num_states self.normaliser = normaliser or NormalisedSigmoid(dim=-1) def _compute(self, x: 'torch.Tensor') ->torch.Tensor: raise NotImplementedError('subclass must implement this method') def forward(self, x: 'torch.Tensor') ->torch.Tensor: r = self._compute(x) return self.normaliser(r) class Gate(Redistribution): """ Classic gate as used in e.g. LSTMs. Notes ----- The vector that is computed by this module gives rise to a diagonal redistribution matrix, i.e. a redistribution matrix that does not really redistribute (not normalised). """ def __init__(self, num_states, num_features, num_out=None, sigmoid=None): super().__init__(num_states, num_features, 1, sigmoid or nn.Sigmoid()) self.fc = nn.Linear(num_features, num_states) self.reset_parameters() def reset_parameters(self): nn.init.orthogonal_(self.fc.weight) nn.init.zeros_(self.fc.bias) def _compute(self, x: 'torch.Tensor') ->torch.Tensor: return self.fc(x) class GateRedistribution(Redistribution): """ Gate-like redistribution that only depends on input. This module directly computes all entries for the redistribution matrix from a linear combination of the input values and is normalised by the activation function. """ def __init__(self, num_states, num_features, num_out=None, normaliser=None ): super().__init__(num_states, num_features, num_out, normaliser) self.fc = nn.Linear(num_features, self.num_states * self.num_out) self.reset_parameters() def reset_parameters(self): nn.init.orthogonal_(self.fc.weight) nn.init.zeros_(self.fc.bias) if self.num_states == self.num_out: with torch.no_grad(): self.fc.bias[0::self.num_out + 1] = 3 def _compute(self, x: 'torch.Tensor') ->torch.Tensor: logits = self.fc(x) return logits.view(-1, self.num_states, self.num_out) class LinearRedistribution(Redistribution): """ Redistribution by normalising a learned matrix. This module has an unnormalised version of the redistribution matrix as parameters and is normalised by applying a non-linearity (the normaliser). The redistribution does not depend on any of the input values, but is updated using the gradients to fit the data. """ def __init__(self, num_states, num_features=0, num_out=None, normaliser =None): super(LinearRedistribution, self).__init__(num_states, 0, num_out, normaliser) self.r = nn.Parameter(torch.empty(self.num_states, self.num_out), requires_grad=True) self.reset_parameters() @torch.no_grad() def reset_parameters(self): if self.num_states == self.num_out: nn.init.eye_(self.r) if type(self.normaliser) is NormalisedSigmoid: torch.mul(self.r, 2, out=self.r) torch.sub(self.r, 1, out=self.r) else: nn.init.orthogonal_(self.r) def _compute(self, x: 'torch.Tensor') ->torch.Tensor: return self.r.unsqueeze(0) class OuterRedistribution(Redistribution): """ Redistribution by (weighted) outer product of two input-dependent vectors. This module computes the entries for the redistribution matrix as the outer product of two vectors that are linear combinations of the input values. There is an option to include a weight matrix parameter to weight each entry in the resulting matrix, which is then normalised using a non-linearity. The weight matrix parameter is updated through the gradients to fit the data. """ def __init__(self, num_states, num_features, num_out=None, normaliser= None, weighted: 'bool'=False): """ Parameters ---------- weighted : bool, optional Whether or not to use a weighted outer product. """ super(OuterRedistribution, self).__init__(num_states, num_features, num_out, normaliser) self.weighted = weighted self.r = nn.Parameter(torch.empty(self.num_states, self.num_out), requires_grad=weighted) self.fc1 = nn.Linear(num_features, self.num_states) self.fc2 = nn.Linear(num_features, self.num_out) self.phi = lambda x: x self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.r) nn.init.orthogonal_(self.fc1.weight) nn.init.zeros_(self.fc1.bias) nn.init.orthogonal_(self.fc2.weight) nn.init.zeros_(self.fc2.bias) def _compute(self, x: 'torch.Tensor') ->torch.Tensor: a1 = self.phi(self.fc1(x)) a2 = self.phi(self.fc2(x)) outer = a1.unsqueeze(-1) * a2.unsqueeze(-2) if self.weighted: outer *= self.r return outer class MCFullyConnected(ExtendedTorchModule): def __init__(self, in_features: 'int', out_features: 'int', **kwargs): super().__init__('MCFC', **kwargs) self.mass_input_size = in_features self.aux_input_size = 1 self.hidden_size = out_features self.normaliser = nn.Softmax(dim=-1) self.out_gate = Gate(self.hidden_size, self.aux_input_size) self.junction = get_redistribution('linear', num_states=self. mass_input_size, num_features=self.aux_input_size, num_out=self .hidden_size, normaliser=self.normaliser) @torch.no_grad() def reset_parameters(self): self.out_gate.reset_parameters() self.junction.reset_parameters() def log_gradients(self): for name, parameter in self.named_parameters(): gradient, *_ = parameter.grad.data self.writer.add_summary(f'{name}/grad', gradient) self.writer.add_histogram(f'{name}/grad', gradient) def regualizer(self, merge_in=None): r1 = -torch.mean(self.junction.r ** 2) r2 = -torch.mean(self.out_gate.fc.weight ** 2) r3 = -torch.mean(self.out_gate.fc.bias ** 2) return super().regualizer({'W': r1 + r2 + r3}) def forward(self, x): x_m, x_a = x, x.new_ones(1) j = self.junction(x_a) o = self.out_gate(x_a) m_in = torch.matmul(x_m.unsqueeze(-2), j).squeeze(-2) return o * m_in class MulMCFCNew(ExtendedTorchModule): def __init__(self, in_features, out_features, **kwargs): super().__init__('MulMCFC', **kwargs) self.mcfc = MCFullyConnected(in_features, out_features + 1, **kwargs) self.bias = nn.Parameter(torch.zeros(out_features)) def reset_parameters(self): self.mcfc.reset_parameters() nn.init.zeros_(self.bias) def forward(self, input_0): primals_5 = self.bias primals_3 = self.mcfc.out_gate.fc.weight primals_4 = self.mcfc.out_gate.fc.bias primals_2 = self.mcfc.junction.r primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
hoedt/stable-nalu
MulMCFC
false
3,635
[ "MIT" ]
0
64b3d240db8bff4da857d955f213ef3c7e38e035
https://github.com/hoedt/stable-nalu/tree/64b3d240db8bff4da857d955f213ef3c7e38e035
LinearPool
import torch from torch import nn class LinearPool(nn.Module): def __init__(self): super(LinearPool, self).__init__() def forward(self, feat_map): """ Arguments: feat_map(Tensor): tensor with shape (N, C, H, W) return(Tensor): tensor with shape (N, C, 1, 1) """ EPSILON = 1e-07 _N, _C, _H, _W = feat_map.shape sum_input = torch.sum(feat_map, dim=(-1, -2), keepdim=True) sum_input += EPSILON linear_weight = feat_map / sum_input weighted_value = feat_map * linear_weight return torch.sum(weighted_value, dim=(-1, -2), keepdim=True) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_div_mul_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 1e-07 tmp6 = tmp4 + tmp5 tmp7 = tmp0 / tmp6 tmp8 = tmp0 * tmp7 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.where(xmask, tmp9, 0) tmp12 = tl.sum(tmp11, 1)[:, None] tl.store(in_out_ptr0 + x0, tmp12, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf0 get_raw_stream(0) triton_per_fused_add_div_mul_sum_0[grid(16)](buf1, arg0_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf1, class LinearPoolNew(nn.Module): def __init__(self): super(LinearPoolNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
iampartho/EEE426
LinearPool
false
3,636
[ "Apache-2.0" ]
0
a706660c0efcd4adea44d54c57a34bcaa4439ec1
https://github.com/iampartho/EEE426/tree/a706660c0efcd4adea44d54c57a34bcaa4439ec1
LayerNormChannel
import torch import torch.nn as nn class LayerNormChannel(nn.Module): """ LayerNorm only for Channel Dimension. Input: tensor in shape [B, C, H, W] """ def __init__(self, num_channels, eps=1e-05): super().__init__() self.weight = nn.Parameter(torch.ones(num_channels)) self.bias = nn.Parameter(torch.zeros(num_channels)) self.eps = eps def forward(self, x): u = x.mean(1, keepdim=True) s = (x - u).pow(2).mean(1, keepdim=True) x = (x - u) / torch.sqrt(s + self.eps) x = self.weight.unsqueeze(-1).unsqueeze(-1) * x + self.bias.unsqueeze( -1).unsqueeze(-1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_channels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_pow_sqrt_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 4 x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x3, xmask) tmp2 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = 4.0 tmp14 = tmp12 / tmp13 tmp15 = 1e-05 tmp16 = tmp14 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp1 / tmp17 tmp19 = tmp0 * tmp18 tmp21 = tmp19 + tmp20 tl.store(out_ptr0 + x3, tmp21, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_sub_0[grid(256)](primals_1, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_pow_sqrt_1[grid(256)](primals_2, buf0, primals_3, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 del primals_3 return buf1, primals_1 class LayerNormChannelNew(nn.Module): """ LayerNorm only for Channel Dimension. Input: tensor in shape [B, C, H, W] """ def __init__(self, num_channels, eps=1e-05): super().__init__() self.weight = nn.Parameter(torch.ones(num_channels)) self.bias = nn.Parameter(torch.zeros(num_channels)) self.eps = eps def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
hyenal/tensorflow-image-models
LayerNormChannel
false
3,637
[ "Apache-2.0" ]
0
2012be8ecc7bc23e84dc2488d3e4fe1c80dbfb2c
https://github.com/hyenal/tensorflow-image-models/tree/2012be8ecc7bc23e84dc2488d3e4fe1c80dbfb2c
GAT
from torch.nn import Module import torch from torch.nn.modules.module import Module import torch.nn as nn import torch.nn.functional as F class EdgeGCN(Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, include_adj=True, bias=True): super(EdgeGCN, self).__init__() self.include_adj = include_adj self.in_features = in_features + 1 if self.include_adj else in_features self.out_features = out_features self.fc = nn.Linear(self.in_features, self.out_features, bias=bias) def forward(self, feat, adj): feat_diff = (feat.unsqueeze(0).repeat(feat.shape[0], 1, 1) - feat. unsqueeze(1).repeat(1, feat.shape[0], 1)).abs() if self.include_adj: x = torch.cat((feat_diff, adj.unsqueeze(2)), 2).view(feat.shape [0] * feat.shape[0], -1) else: x = feat_diff.view(feat.shape[0] * feat.shape[0], -1) output = self.fc(x) return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GATLayer(nn.Module): """ Simple GAT layer, similar to https://arxiv.org/abs/1710.10903 """ def __init__(self, in_features, out_features, dropout=0, alpha=0.2, concat=True): super(GATLayer, self).__init__() self.dropout = dropout self.in_features = in_features self.out_features = out_features self.alpha = alpha self.concat = concat self.W = nn.Parameter(torch.zeros(size=(in_features, out_features))) nn.init.xavier_uniform_(self.W.data, gain=1.414) self.a = nn.Parameter(torch.zeros(size=(out_features, 1))) nn.init.xavier_uniform_(self.a.data, gain=1.414) def forward(self, input, adj): h = torch.mm(input, self.W) h.size()[0] a_input = (h.unsqueeze(0).repeat(h.shape[0], 1, 1) - h.unsqueeze(1) .repeat(1, h.shape[0], 1)).abs() e = F.relu(torch.matmul(a_input, self.a).squeeze(2)) zero_vec = -9000000000000000.0 * torch.ones_like(e) attention = torch.where(adj > 0, e, zero_vec) attention = F.softmax(attention, dim=1) attention = F.dropout(attention, self.dropout, training=self.training) h = torch.matmul(attention, h) if self.concat: return F.relu(h) else: return h def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GAT(nn.Module): def __init__(self, nfeat, nclass): super(GAT, self).__init__() self.gat1 = GATLayer(nfeat, 128) self.gat2 = GATLayer(128, 128) self.gat3 = GATLayer(128, 128) self.edge_gc = EdgeGCN(128, nclass, include_adj=False) def forward(self, feat, adj): x = self.gat1(feat, adj) x = self.gat2(x, adj) x = self.gat3(x, adj) a = self.edge_gc(x, adj) return a def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'nfeat': 4, 'nclass': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module from torch.nn.modules.module import Module import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_abs_repeat_sgn_sub_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex % 512 x0 = xindex % 128 x2 = xindex // 512 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (x0 + 128 * x2), None, eviction_policy= 'evict_last') tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.full([1], 0, tl.int32) tmp5 = tmp4 < tmp2 tmp6 = tmp5.to(tl.int8) tmp7 = tmp2 < tmp4 tmp8 = tmp7.to(tl.int8) tmp9 = tmp6 - tmp8 tmp10 = tmp9.to(tmp2.dtype) tl.store(out_ptr0 + x4, tmp3, None) tl.store(out_ptr1 + x4, tmp10, None) @triton.jit def triton_poi_fused_gt_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_mul_relu_where_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tl.full([1], 0, tl.int32) tmp3 = triton_helpers.maximum(tmp2, tmp1) tmp4 = -8999999815811072.0 tmp5 = tl.where(tmp0, tmp3, tmp4) tmp8 = triton_helpers.maximum(tmp2, tmp7) tmp9 = tl.where(tmp6, tmp8, tmp4) tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = triton_helpers.maximum(tmp2, tmp12) tmp14 = tl.where(tmp11, tmp13, tmp4) tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = triton_helpers.maximum(tmp2, tmp17) tmp19 = tl.where(tmp16, tmp18, tmp4) tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp9 - tmp20 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tmp14 - tmp20 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tmp19 - tmp20 tmp30 = tl_math.exp(tmp29) tmp31 = tmp28 + tmp30 tl.store(out_ptr0 + x0, tmp20, xmask) tl.store(out_ptr1 + x0, tmp31, xmask) @triton.jit def triton_poi_fused__softmax_mul_relu_threshold_backward_where_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.full([1], 0, tl.int32) tmp3 = triton_helpers.maximum(tmp2, tmp1) tmp4 = -8999999815811072.0 tmp5 = tl.where(tmp0, tmp3, tmp4) tmp7 = tmp5 - tmp6 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 / tmp9 tmp11 = 0.0 tmp12 = tmp3 <= tmp11 tl.store(out_ptr0 + x2, tmp10, xmask) tl.store(out_ptr1 + x2, tmp12, xmask) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_abs_repeat_sgn_sub_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex % 512 x0 = xindex % 128 x2 = xindex // 512 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (x0 + 128 * x2), None, eviction_policy= 'evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp1, tmp3) tmp5 = tmp2 - tmp4 tmp6 = tl_math.abs(tmp5) tmp7 = tmp1 < tmp5 tmp8 = tmp7.to(tl.int8) tmp9 = tmp5 < tmp1 tmp10 = tmp9.to(tl.int8) tmp11 = tmp8 - tmp10 tmp12 = tmp11.to(tmp5.dtype) tl.store(out_ptr0 + x4, tmp6, None) tl.store(out_ptr1 + x4, tmp12, None) @triton.jit def triton_poi_fused_relu_threshold_backward_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 128), (128, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (128, 1), (1, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (128, 128), (128, 1)) assert_size_stride(primals_6, (128, 1), (1, 1)) assert_size_stride(primals_7, (128, 128), (128, 1)) assert_size_stride(primals_8, (128, 1), (1, 1)) assert_size_stride(primals_9, (4, 128), (128, 1)) assert_size_stride(primals_10, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 128), (128, 1), torch.float32) extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 128), (512, 128, 1), torch.float32) buf33 = empty_strided_cuda((4, 4, 128), (512, 128, 1), torch.float32) get_raw_stream(0) triton_poi_fused_abs_repeat_sgn_sub_0[grid(2048)](buf0, buf1, buf33, 2048, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (16, 128), (128, 1), 0), primals_3, out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_gt_1[grid(16)](primals_4, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf4 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32) triton_poi_fused__softmax_mul_relu_where_2[grid(4)](buf3, buf2, buf4, buf5, 4, XBLOCK=4, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf32 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused__softmax_mul_relu_threshold_backward_where_3[grid(16) ](buf3, buf2, buf4, buf5, buf6, buf32, 16, XBLOCK=16, num_warps =1, num_stages=1) buf7 = empty_strided_cuda((4, 128), (128, 1), torch.float32) extern_kernels.mm(buf6, buf0, out=buf7) buf8 = buf7 del buf7 triton_poi_fused_relu_4[grid(512)](buf8, 512, XBLOCK=256, num_warps =4, num_stages=1) buf9 = empty_strided_cuda((4, 128), (128, 1), torch.float32) extern_kernels.mm(buf8, primals_5, out=buf9) buf10 = empty_strided_cuda((4, 4, 128), (512, 128, 1), torch.float32) buf31 = empty_strided_cuda((4, 4, 128), (512, 128, 1), torch.float32) triton_poi_fused_abs_repeat_sgn_sub_0[grid(2048)](buf9, buf10, buf31, 2048, XBLOCK=128, num_warps=4, num_stages=1) buf11 = buf2 del buf2 extern_kernels.mm(reinterpret_tensor(buf10, (16, 128), (128, 1), 0), primals_6, out=buf11) buf12 = buf5 del buf5 buf13 = buf4 del buf4 triton_poi_fused__softmax_mul_relu_where_2[grid(4)](buf3, buf11, buf12, buf13, 4, XBLOCK=4, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf30 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused__softmax_mul_relu_threshold_backward_where_3[grid(16) ](buf3, buf11, buf12, buf13, buf14, buf30, 16, XBLOCK=16, num_warps=1, num_stages=1) buf15 = empty_strided_cuda((4, 128), (128, 1), torch.float32) extern_kernels.mm(buf14, buf9, out=buf15) buf16 = buf15 del buf15 triton_poi_fused_relu_4[grid(512)](buf16, 512, XBLOCK=256, num_warps=4, num_stages=1) buf17 = empty_strided_cuda((4, 128), (128, 1), torch.float32) extern_kernels.mm(buf16, primals_7, out=buf17) buf18 = empty_strided_cuda((4, 4, 128), (512, 128, 1), torch.float32) buf29 = empty_strided_cuda((4, 4, 128), (512, 128, 1), torch.float32) triton_poi_fused_abs_repeat_sgn_sub_0[grid(2048)](buf17, buf18, buf29, 2048, XBLOCK=128, num_warps=4, num_stages=1) buf19 = buf11 del buf11 extern_kernels.mm(reinterpret_tensor(buf18, (16, 128), (128, 1), 0), primals_8, out=buf19) buf20 = buf13 del buf13 buf21 = buf12 del buf12 triton_poi_fused__softmax_mul_relu_where_2[grid(4)](buf3, buf19, buf20, buf21, 4, XBLOCK=4, num_warps=1, num_stages=1) buf22 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf28 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused__softmax_mul_relu_threshold_backward_where_3[grid(16) ](buf3, buf19, buf20, buf21, buf22, buf28, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf19 del buf20 del buf21 buf23 = empty_strided_cuda((4, 128), (128, 1), torch.float32) extern_kernels.mm(buf22, buf17, out=buf23) buf24 = empty_strided_cuda((4, 4, 128), (512, 128, 1), torch.float32) buf26 = empty_strided_cuda((4, 4, 128), (512, 128, 1), torch.float32) triton_poi_fused_abs_repeat_sgn_sub_5[grid(2048)](buf23, buf24, buf26, 2048, XBLOCK=256, num_warps=4, num_stages=1) buf25 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_10, reinterpret_tensor(buf24, (16, 128 ), (128, 1), 0), reinterpret_tensor(primals_9, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf25) del primals_10 buf27 = empty_strided_cuda((4, 128), (128, 1), torch.bool) triton_poi_fused_relu_threshold_backward_6[grid(512)](buf23, buf27, 512, XBLOCK=256, num_warps=4, num_stages=1) del buf23 return buf25, buf3, buf6, buf8, buf14, buf16, buf22, reinterpret_tensor( buf24, (16, 128), (128, 1), 0 ), primals_9, buf26, buf27, reinterpret_tensor(buf17, (128, 4), (1, 128), 0), buf28, reinterpret_tensor(buf18, (128, 16), (1, 128), 0 ), reinterpret_tensor(primals_8, (1, 128), (1, 1), 0 ), buf29, reinterpret_tensor(primals_7, (128, 128), (1, 128), 0 ), reinterpret_tensor(buf9, (128, 4), (1, 128), 0 ), buf30, reinterpret_tensor(buf10, (128, 16), (1, 128), 0 ), reinterpret_tensor(primals_6, (1, 128), (1, 1), 0 ), buf31, reinterpret_tensor(primals_5, (128, 128), (1, 128), 0 ), reinterpret_tensor(buf0, (128, 4), (1, 128), 0 ), buf32, reinterpret_tensor(buf1, (128, 16), (1, 128), 0 ), reinterpret_tensor(primals_3, (1, 128), (1, 1), 0 ), buf33, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0) class EdgeGCN(Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, include_adj=True, bias=True): super(EdgeGCN, self).__init__() self.include_adj = include_adj self.in_features = in_features + 1 if self.include_adj else in_features self.out_features = out_features self.fc = nn.Linear(self.in_features, self.out_features, bias=bias) def forward(self, feat, adj): feat_diff = (feat.unsqueeze(0).repeat(feat.shape[0], 1, 1) - feat. unsqueeze(1).repeat(1, feat.shape[0], 1)).abs() if self.include_adj: x = torch.cat((feat_diff, adj.unsqueeze(2)), 2).view(feat.shape [0] * feat.shape[0], -1) else: x = feat_diff.view(feat.shape[0] * feat.shape[0], -1) output = self.fc(x) return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GATLayer(nn.Module): """ Simple GAT layer, similar to https://arxiv.org/abs/1710.10903 """ def __init__(self, in_features, out_features, dropout=0, alpha=0.2, concat=True): super(GATLayer, self).__init__() self.dropout = dropout self.in_features = in_features self.out_features = out_features self.alpha = alpha self.concat = concat self.W = nn.Parameter(torch.zeros(size=(in_features, out_features))) nn.init.xavier_uniform_(self.W.data, gain=1.414) self.a = nn.Parameter(torch.zeros(size=(out_features, 1))) nn.init.xavier_uniform_(self.a.data, gain=1.414) def forward(self, input, adj): h = torch.mm(input, self.W) h.size()[0] a_input = (h.unsqueeze(0).repeat(h.shape[0], 1, 1) - h.unsqueeze(1) .repeat(1, h.shape[0], 1)).abs() e = F.relu(torch.matmul(a_input, self.a).squeeze(2)) zero_vec = -9000000000000000.0 * torch.ones_like(e) attention = torch.where(adj > 0, e, zero_vec) attention = F.softmax(attention, dim=1) attention = F.dropout(attention, self.dropout, training=self.training) h = torch.matmul(attention, h) if self.concat: return F.relu(h) else: return h def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GATNew(nn.Module): def __init__(self, nfeat, nclass): super(GATNew, self).__init__() self.gat1 = GATLayer(nfeat, 128) self.gat2 = GATLayer(128, 128) self.gat3 = GATLayer(128, 128) self.edge_gc = EdgeGCN(128, nclass, include_adj=False) def forward(self, input_0, input_1): primals_1 = self.gat1.W primals_3 = self.gat1.a primals_5 = self.gat2.W primals_6 = self.gat2.a primals_7 = self.gat3.W primals_8 = self.gat3.a primals_9 = self.edge_gc.fc.weight primals_10 = self.edge_gc.fc.bias primals_2 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
hou-yz/pygcn
GAT
false
3,638
[ "MIT" ]
0
26195954035c5eaae2d6e086cfec24cad2642f2e
https://github.com/hou-yz/pygcn/tree/26195954035c5eaae2d6e086cfec24cad2642f2e
InvConv2d
import torch from torch import nn from torch.nn import functional as F class InvConv2d(nn.Module): def __init__(self, in_channel): super().__init__() weight = torch.randn(in_channel, in_channel) q, _ = torch.qr(weight) weight = q.unsqueeze(2).unsqueeze(3) self.weight = nn.Parameter(weight) def forward(self, input): _, _, height, width = input.shape out = F.conv2d(input, self.weight) logdet = height * width * torch.slogdet(self.weight.squeeze().double() )[1].float() return out, logdet def reverse(self, output): return F.conv2d(output, self.weight.squeeze().inverse().unsqueeze(2 ).unsqueeze(3)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channel': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__to_copy_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float64) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused__to_copy_mul_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tmp1.to(tl.float32) tmp3 = 16.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp4, None) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (1, 4, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(4, 4)](primals_2, buf0, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(primals_1, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) del buf0 buf2 = empty_strided_cuda((4, 4), (1, 4), torch.float64) triton_poi_fused__to_copy_1[grid(16)](primals_2, buf2, 16, XBLOCK= 16, num_warps=1, num_stages=1) buf3 = torch.ops.aten._linalg_slogdet.default(buf2) del buf2 buf5 = buf3[1] buf6 = buf3[2] buf7 = buf3[3] del buf3 buf8 = empty_strided_cuda((), (), torch.float32) triton_poi_fused__to_copy_mul_2[grid(1)](buf5, buf8, 1, XBLOCK=1, num_warps=1, num_stages=1) del buf5 return buf1, buf8, primals_1, primals_2, buf6, buf7 class InvConv2dNew(nn.Module): def __init__(self, in_channel): super().__init__() weight = torch.randn(in_channel, in_channel) q, _ = torch.qr(weight) weight = q.unsqueeze(2).unsqueeze(3) self.weight = nn.Parameter(weight) def reverse(self, output): return F.conv2d(output, self.weight.squeeze().inverse().unsqueeze(2 ).unsqueeze(3)) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0], output[1]
hologerry/glow-pytorch-1
InvConv2d
false
3,639
[ "MIT" ]
0
9d3f95f4ff7f0a1361796a9b2554e3c229aad9b7
https://github.com/hologerry/glow-pytorch-1/tree/9d3f95f4ff7f0a1361796a9b2554e3c229aad9b7
ExpPool
import torch from torch import nn class ExpPool(nn.Module): def __init__(self): super(ExpPool, self).__init__() def forward(self, feat_map): """ Numerically stable implementation of the operation Arguments: feat_map(Tensor): tensor with shape (N, C, H, W) return(Tensor): tensor with shape (N, C, 1, 1) """ EPSILON = 1e-07 _N, _C, _H, _W = feat_map.shape m, _ = torch.max(feat_map, dim=-1, keepdim=True)[0].max(dim=-2, keepdim=True) sum_exp = torch.sum(torch.exp(feat_map - m), dim=(-1, -2), keepdim=True ) sum_exp += EPSILON exp_weight = torch.exp(feat_map - m) / sum_exp weighted_value = feat_map * exp_weight return torch.sum(weighted_value, dim=(-1, -2), keepdim=True) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_div_exp_max_mul_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex r1 = rindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp8 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp10 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp18 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp24 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp26 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp31 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp9 = triton_helpers.maximum(tmp7, tmp8) tmp11 = triton_helpers.maximum(tmp9, tmp10) tmp13 = triton_helpers.maximum(tmp11, tmp12) tmp14 = triton_helpers.maximum(tmp6, tmp13) tmp17 = triton_helpers.maximum(tmp15, tmp16) tmp19 = triton_helpers.maximum(tmp17, tmp18) tmp21 = triton_helpers.maximum(tmp19, tmp20) tmp22 = triton_helpers.maximum(tmp14, tmp21) tmp25 = triton_helpers.maximum(tmp23, tmp24) tmp27 = triton_helpers.maximum(tmp25, tmp26) tmp29 = triton_helpers.maximum(tmp27, tmp28) tmp30 = triton_helpers.maximum(tmp22, tmp29) tmp32 = tmp31 - tmp30 tmp33 = tl_math.exp(tmp32) tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK]) tmp36 = tl.where(xmask, tmp34, 0) tmp37 = tl.sum(tmp36, 1)[:, None] tmp38 = 1e-07 tmp39 = tmp37 + tmp38 tmp40 = tmp33 / tmp39 tmp41 = tmp31 * tmp40 tmp42 = tl.broadcast_to(tmp41, [XBLOCK, RBLOCK]) tmp44 = tl.where(xmask, tmp42, 0) tmp45 = tl.sum(tmp44, 1)[:, None] tl.store(in_out_ptr0 + x0, tmp45, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf2 = reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf1 get_raw_stream(0) triton_per_fused_add_div_exp_max_mul_sub_sum_0[grid(16)](buf2, arg0_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf2, class ExpPoolNew(nn.Module): def __init__(self): super(ExpPoolNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
iampartho/EEE426
ExpPool
false
3,640
[ "Apache-2.0" ]
0
a706660c0efcd4adea44d54c57a34bcaa4439ec1
https://github.com/iampartho/EEE426/tree/a706660c0efcd4adea44d54c57a34bcaa4439ec1
CNNCifar
from _paritybench_helpers import _mock_config import torch from torch import nn import torch.nn.functional as F class CNNCifar(nn.Module): def __init__(self, args): super(CNNCifar, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, args.num_classes) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return F.log_softmax(x, dim=1) def get_inputs(): return [torch.rand([4, 3, 32, 32])] def get_init_inputs(): return [[], {'args': _mock_config(num_classes=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 18816 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 784 % 6 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4704 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x3 = xindex // 14 x2 = xindex // 1176 x4 = xindex % 1176 tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + 1184 * x2), tmp6, xmask) tl.store(out_ptr1 + (x4 + 1280 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 100 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 120 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 336 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 84 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__log_softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (6,), (1,)) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (120, 400), (400, 1)) assert_size_stride(primals_7, (120,), (1,)) assert_size_stride(primals_8, (84, 120), (120, 1)) assert_size_stride(primals_9, (84,), (1,)) assert_size_stride(primals_10, (4, 84), (84, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(18816)](buf1, primals_2, 18816, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch .float32) buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch .int8) triton_poi_fused_max_pool2d_with_indices_1[grid(4704)](buf1, buf2, buf3, 4704, XBLOCK=256, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(6400)](buf5, primals_5, 6400, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8) buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32 ) triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf5, buf6, buf7, 1600, XBLOCK=128, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 120), (1, 400), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(480)](buf9, primals_7, 480, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 84), (84, 1), torch.float32) extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (120, 84), (1, 120), 0), out=buf10) buf11 = buf10 del buf10 triton_poi_fused_relu_5[grid(336)](buf11, primals_9, 336, XBLOCK= 256, num_warps=4, num_stages=1) del primals_9 buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_11, buf11, reinterpret_tensor( primals_10, (84, 4), (1, 84), 0), alpha=1, beta=1, out=buf12) del primals_11 buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__log_softmax_6[grid(16)](buf12, buf13, 16, XBLOCK= 16, num_warps=1, num_stages=1) buf14 = buf12 del buf12 triton_poi_fused__log_softmax_7[grid(16)](buf13, buf14, 16, XBLOCK= 16, num_warps=1, num_stages=1) del buf13 return (buf14, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf9, buf11, buf14, primals_10, primals_8, primals_6) class CNNCifarNew(nn.Module): def __init__(self, args): super(CNNCifarNew, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, args.num_classes) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_10 = self.fc3.weight primals_11 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
EugeneYuZ/RL-FL
CNNCifar
false
3,641
[ "MIT" ]
0
cb4cc2a17eda1dbf60d696e361f31e433d8dbdea
https://github.com/EugeneYuZ/RL-FL/tree/cb4cc2a17eda1dbf60d696e361f31e433d8dbdea
Pooling
import torch import torch.nn as nn class Pooling(nn.Module): """ Implementation of pooling for PoolFormer --pool_size: pooling size """ def __init__(self, pool_size=3): super().__init__() self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size // 2, count_include_pad=False) def forward(self, x): return self.pool(x) - x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_avg_pool2d_sub_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x3 = xindex tmp54 = tl.load(in_ptr0 + x3, xmask) tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-5 + x3), tmp10 & xmask, other=0.0) tmp12 = x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-4 + x3), tmp16 & xmask, other=0.0) tmp18 = tmp17 + tmp11 tmp19 = 1 + x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + (-3 + x3), tmp23 & xmask, other=0.0) tmp25 = tmp24 + tmp18 tmp26 = x1 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp29 & tmp9 tmp31 = tl.load(in_ptr0 + (-1 + x3), tmp30 & xmask, other=0.0) tmp32 = tmp31 + tmp25 tmp33 = tmp29 & tmp15 tmp34 = tl.load(in_ptr0 + x3, tmp33 & xmask, other=0.0) tmp35 = tmp34 + tmp32 tmp36 = tmp29 & tmp22 tmp37 = tl.load(in_ptr0 + (1 + x3), tmp36 & xmask, other=0.0) tmp38 = tmp37 + tmp35 tmp39 = 1 + x1 tmp40 = tmp39 >= tmp1 tmp41 = tmp39 < tmp3 tmp42 = tmp40 & tmp41 tmp43 = tmp42 & tmp9 tmp44 = tl.load(in_ptr0 + (3 + x3), tmp43 & xmask, other=0.0) tmp45 = tmp44 + tmp38 tmp46 = tmp42 & tmp15 tmp47 = tl.load(in_ptr0 + (4 + x3), tmp46 & xmask, other=0.0) tmp48 = tmp47 + tmp45 tmp49 = tmp42 & tmp22 tmp50 = tl.load(in_ptr0 + (5 + x3), tmp49 & xmask, other=0.0) tmp51 = tmp50 + tmp48 tmp52 = (0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) * (0 * (0 >= - 1 + x1) + (-1 + x1) * (-1 + x1 > 0)) + (4 * (4 <= 2 + x0) + (2 + x0 ) * (2 + x0 < 4)) * (4 * (4 <= 2 + x1) + (2 + x1) * (2 + x1 < 4) ) + -1 * (0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) * (4 * (4 <= 2 + x1) + (2 + x1) * (2 + x1 < 4)) + -1 * (0 * (0 >= -1 + x1) + (-1 + x1) * (-1 + x1 > 0)) * (4 * (4 <= 2 + x0) + (2 + x0) * (2 + x0 < 4)) tmp53 = tmp51 / tmp52 tmp55 = tmp53 - tmp54 tl.store(in_out_ptr0 + x3, tmp55, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_avg_pool2d_sub_0[grid(256)](buf1, arg0_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf1, class PoolingNew(nn.Module): """ Implementation of pooling for PoolFormer --pool_size: pooling size """ def __init__(self, pool_size=3): super().__init__() self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size // 2, count_include_pad=False) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
hyenal/tensorflow-image-models
Pooling
false
3,642
[ "Apache-2.0" ]
0
2012be8ecc7bc23e84dc2488d3e4fe1c80dbfb2c
https://github.com/hyenal/tensorflow-image-models/tree/2012be8ecc7bc23e84dc2488d3e4fe1c80dbfb2c
ExtResNetBlock
import torch from torch import nn def conv3d(in_channels, out_channels, kernel_size, bias, padding=1): return nn.Conv3d(in_channels, out_channels, kernel_size, padding= padding, bias=bias) def create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding=1): """ Create a list of modules with together constitute a single conv layer with non-linearity and optional batchnorm/groupnorm. Args: in_channels (int): number of input channels out_channels (int): number of output channels order (string): order of things, e.g. 'cr' -> conv + ReLU 'gcr' -> groupnorm + conv + ReLU 'cl' -> conv + LeakyReLU 'ce' -> conv + ELU 'bcr' -> batchnorm + conv + ReLU num_groups (int): number of groups for the GroupNorm padding (int): add zero-padding to the input Return: list of tuple (name, module) """ assert 'c' in order, 'Conv layer MUST be present' assert order[0 ] not in 'rle', 'Non-linearity cannot be the first operation in the layer' modules = [] for i, char in enumerate(order): if char == 'r': modules.append(('ReLU', nn.ReLU(inplace=True))) elif char == 'l': modules.append(('LeakyReLU', nn.LeakyReLU(negative_slope=0.1, inplace=True))) elif char == 'e': modules.append(('ELU', nn.ELU(inplace=True))) elif char == 'c': bias = not ('g' in order or 'b' in order) modules.append(('conv', conv3d(in_channels, out_channels, kernel_size, bias, padding=padding))) elif char == 'g': is_before_conv = i < order.index('c') if is_before_conv: num_channels = in_channels else: num_channels = out_channels if num_channels < num_groups: num_groups = 1 assert num_channels % num_groups == 0, f'Expected number of channels in input to be divisible by num_groups. num_channels={num_channels}, num_groups={num_groups}' modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups, num_channels=num_channels))) elif char == 'b': is_before_conv = i < order.index('c') if is_before_conv: modules.append(('batchnorm', nn.BatchNorm3d(in_channels))) else: modules.append(('batchnorm', nn.BatchNorm3d(out_channels))) else: raise ValueError( f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']" ) return modules class SingleConv(nn.Sequential): """ Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order of operations can be specified via the `order` parameter Args: in_channels (int): number of input channels out_channels (int): number of output channels kernel_size (int): size of the convolving kernel order (string): determines the order of layers, e.g. 'cr' -> conv + ReLU 'crg' -> conv + ReLU + groupnorm 'cl' -> conv + LeakyReLU 'ce' -> conv + ELU num_groups (int): number of groups for the GroupNorm """ def __init__(self, in_channels, out_channels, kernel_size=3, order= 'crg', num_groups=8, padding=1): super(SingleConv, self).__init__() for name, module in create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding=padding): self.add_module(name, module) class ExtResNetBlock(nn.Module): """ Basic UNet block consisting of a SingleConv followed by the residual block. The SingleConv takes care of increasing/decreasing the number of channels and also ensures that the number of output channels is compatible with the residual block that follows. This block can be used instead of standard DoubleConv in the Encoder module. Motivated by: https://arxiv.org/pdf/1706.00120.pdf Notice we use ELU instead of ReLU (order='cge') and put non-linearity after the groupnorm. """ def __init__(self, in_channels, out_channels, kernel_size=3, order= 'cge', num_groups=8, **kwargs): super(ExtResNetBlock, self).__init__() self.conv1 = SingleConv(in_channels, out_channels, kernel_size= kernel_size, order=order, num_groups=num_groups) self.conv2 = SingleConv(out_channels, out_channels, kernel_size= kernel_size, order=order, num_groups=num_groups) n_order = order for c in 'rel': n_order = n_order.replace(c, '') self.conv3 = SingleConv(out_channels, out_channels, kernel_size= kernel_size, order=n_order, num_groups=num_groups) if 'l' in order: self.non_linearity = nn.LeakyReLU(negative_slope=0.1, inplace=True) elif 'e' in order: self.non_linearity = nn.ELU(inplace=True) else: self.non_linearity = nn.ReLU(inplace=True) def forward(self, x): out = self.conv1(x) residual = out out = self.conv2(out) out = self.conv3(out) out += residual out = self.non_linearity(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_elu_native_group_norm_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 64.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = 0.0 tmp29 = tmp27 > tmp28 tmp30 = 1.0 tmp31 = tmp27 * tmp30 tmp32 = libdevice.expm1(tmp31) tmp33 = tmp32 * tmp30 tmp34 = tl.where(tmp29, tmp31, tmp33) tl.store(in_out_ptr0 + (r1 + 64 * x0), tmp34, xmask) tl.store(out_ptr2 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_per_fused_add_elu_native_group_norm_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr3 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 64.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp29 = tmp27 + tmp28 tmp30 = 0.0 tmp31 = tmp29 > tmp30 tmp32 = 1.0 tmp33 = tmp29 * tmp32 tmp34 = libdevice.expm1(tmp33) tmp35 = tmp34 * tmp32 tmp36 = tl.where(tmp31, tmp33, tmp35) tl.store(in_out_ptr0 + (r1 + 64 * x0), tmp36, xmask) tl.store(out_ptr2 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = buf4 del buf4 buf5 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) get_raw_stream(0) triton_per_fused_elu_native_group_norm_0[grid(4)](buf6, buf0, primals_3, primals_4, buf1, buf5, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_4 buf7 = extern_kernels.convolution(reinterpret_tensor(buf6, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_5, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf7, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf8 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf13 = buf11 del buf11 buf12 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) triton_per_fused_elu_native_group_norm_0[grid(4)](buf13, buf7, primals_6, primals_7, buf8, buf12, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_7 buf14 = extern_kernels.convolution(reinterpret_tensor(buf13, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_8, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf14, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf15 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf20 = buf19 del buf19 buf18 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) triton_per_fused_add_elu_native_group_norm_1[grid(4)](buf20, buf14, primals_9, primals_10, buf6, buf15, buf18, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_10 return (buf20, primals_1, primals_3, primals_5, primals_6, primals_8, primals_9, reinterpret_tensor(primals_2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf0, reinterpret_tensor(buf1, (4, 1), (1, 1), 0), reinterpret_tensor(buf5, (4, 1), (1, 1), 0), buf6, buf7, reinterpret_tensor(buf8, (4, 1), (1, 1), 0), reinterpret_tensor( buf12, (4, 1), (1, 1), 0), buf13, buf14, reinterpret_tensor(buf15, (4, 1), (1, 1), 0), reinterpret_tensor(buf18, (4, 1), (1, 1), 0), buf20 ) def conv3d(in_channels, out_channels, kernel_size, bias, padding=1): return nn.Conv3d(in_channels, out_channels, kernel_size, padding= padding, bias=bias) def create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding=1): """ Create a list of modules with together constitute a single conv layer with non-linearity and optional batchnorm/groupnorm. Args: in_channels (int): number of input channels out_channels (int): number of output channels order (string): order of things, e.g. 'cr' -> conv + ReLU 'gcr' -> groupnorm + conv + ReLU 'cl' -> conv + LeakyReLU 'ce' -> conv + ELU 'bcr' -> batchnorm + conv + ReLU num_groups (int): number of groups for the GroupNorm padding (int): add zero-padding to the input Return: list of tuple (name, module) """ assert 'c' in order, 'Conv layer MUST be present' assert order[0 ] not in 'rle', 'Non-linearity cannot be the first operation in the layer' modules = [] for i, char in enumerate(order): if char == 'r': modules.append(('ReLU', nn.ReLU(inplace=True))) elif char == 'l': modules.append(('LeakyReLU', nn.LeakyReLU(negative_slope=0.1, inplace=True))) elif char == 'e': modules.append(('ELU', nn.ELU(inplace=True))) elif char == 'c': bias = not ('g' in order or 'b' in order) modules.append(('conv', conv3d(in_channels, out_channels, kernel_size, bias, padding=padding))) elif char == 'g': is_before_conv = i < order.index('c') if is_before_conv: num_channels = in_channels else: num_channels = out_channels if num_channels < num_groups: num_groups = 1 assert num_channels % num_groups == 0, f'Expected number of channels in input to be divisible by num_groups. num_channels={num_channels}, num_groups={num_groups}' modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups, num_channels=num_channels))) elif char == 'b': is_before_conv = i < order.index('c') if is_before_conv: modules.append(('batchnorm', nn.BatchNorm3d(in_channels))) else: modules.append(('batchnorm', nn.BatchNorm3d(out_channels))) else: raise ValueError( f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']" ) return modules class SingleConv(nn.Sequential): """ Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order of operations can be specified via the `order` parameter Args: in_channels (int): number of input channels out_channels (int): number of output channels kernel_size (int): size of the convolving kernel order (string): determines the order of layers, e.g. 'cr' -> conv + ReLU 'crg' -> conv + ReLU + groupnorm 'cl' -> conv + LeakyReLU 'ce' -> conv + ELU num_groups (int): number of groups for the GroupNorm """ def __init__(self, in_channels, out_channels, kernel_size=3, order= 'crg', num_groups=8, padding=1): super(SingleConv, self).__init__() for name, module in create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding=padding): self.add_module(name, module) class ExtResNetBlockNew(nn.Module): """ Basic UNet block consisting of a SingleConv followed by the residual block. The SingleConv takes care of increasing/decreasing the number of channels and also ensures that the number of output channels is compatible with the residual block that follows. This block can be used instead of standard DoubleConv in the Encoder module. Motivated by: https://arxiv.org/pdf/1706.00120.pdf Notice we use ELU instead of ReLU (order='cge') and put non-linearity after the groupnorm. """ def __init__(self, in_channels, out_channels, kernel_size=3, order= 'cge', num_groups=8, **kwargs): super(ExtResNetBlockNew, self).__init__() self.conv1 = SingleConv(in_channels, out_channels, kernel_size= kernel_size, order=order, num_groups=num_groups) self.conv2 = SingleConv(out_channels, out_channels, kernel_size= kernel_size, order=order, num_groups=num_groups) n_order = order for c in 'rel': n_order = n_order.replace(c, '') self.conv3 = SingleConv(out_channels, out_channels, kernel_size= kernel_size, order=n_order, num_groups=num_groups) if 'l' in order: self.non_linearity = nn.LeakyReLU(negative_slope=0.1, inplace=True) elif 'e' in order: self.non_linearity = nn.ELU(inplace=True) else: self.non_linearity = nn.ReLU(inplace=True) def forward(self, input_0): primals_1 = self.conv1.conv.weight primals_3 = self.conv1.groupnorm.weight primals_4 = self.conv1.groupnorm.bias primals_5 = self.conv2.conv.weight primals_6 = self.conv2.groupnorm.weight primals_7 = self.conv2.groupnorm.bias primals_8 = self.conv3.conv.weight primals_9 = self.conv3.groupnorm.weight primals_10 = self.conv3.groupnorm.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
hummat/convolutional_occupancy_networks
ExtResNetBlock
false
3,643
[ "MIT" ]
0
bb351edff59c196e01aa687943e19fee4ac11077
https://github.com/hummat/convolutional_occupancy_networks/tree/bb351edff59c196e01aa687943e19fee4ac11077
PcamPool
import torch from torch import nn class PcamPool(nn.Module): def __init__(self): super(PcamPool, self).__init__() def forward(self, feat_map, logit_map): assert logit_map is not None prob_map = torch.sigmoid(logit_map) weight_map = prob_map / prob_map.sum(dim=2, keepdim=True).sum(dim=3, keepdim=True) feat = (feat_map * weight_map).sum(dim=2, keepdim=True).sum(dim=3, keepdim=True) return feat def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sigmoid_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp8 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp31 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp35 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp37 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp40 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp43 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp1 = tl.sigmoid(tmp0) tmp3 = tl.sigmoid(tmp2) tmp4 = tmp1 + tmp3 tmp6 = tl.sigmoid(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl.sigmoid(tmp8) tmp10 = tmp7 + tmp9 tmp12 = tl.sigmoid(tmp11) tmp14 = tl.sigmoid(tmp13) tmp15 = tmp12 + tmp14 tmp17 = tl.sigmoid(tmp16) tmp18 = tmp15 + tmp17 tmp20 = tl.sigmoid(tmp19) tmp21 = tmp18 + tmp20 tmp22 = tmp10 + tmp21 tmp24 = tl.sigmoid(tmp23) tmp26 = tl.sigmoid(tmp25) tmp27 = tmp24 + tmp26 tmp29 = tl.sigmoid(tmp28) tmp30 = tmp27 + tmp29 tmp32 = tl.sigmoid(tmp31) tmp33 = tmp30 + tmp32 tmp34 = tmp22 + tmp33 tmp36 = tl.sigmoid(tmp35) tmp38 = tl.sigmoid(tmp37) tmp39 = tmp36 + tmp38 tmp41 = tl.sigmoid(tmp40) tmp42 = tmp39 + tmp41 tmp44 = tl.sigmoid(tmp43) tmp45 = tmp42 + tmp44 tmp46 = tmp34 + tmp45 tl.store(out_ptr0 + x0, tmp46, xmask) @triton.jit def triton_poi_fused_div_mul_sigmoid_sum_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp7 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask) tmp12 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp13 = tl.load(in_ptr1 + (8 + x0 + 16 * x1), xmask) tmp18 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp19 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask) tmp2 = tl.sigmoid(tmp1) tmp4 = tmp2 / tmp3 tmp5 = tmp0 * tmp4 tmp8 = tl.sigmoid(tmp7) tmp9 = tmp8 / tmp3 tmp10 = tmp6 * tmp9 tmp11 = tmp5 + tmp10 tmp14 = tl.sigmoid(tmp13) tmp15 = tmp14 / tmp3 tmp16 = tmp12 * tmp15 tmp17 = tmp11 + tmp16 tmp20 = tl.sigmoid(tmp19) tmp21 = tmp20 / tmp3 tmp22 = tmp18 * tmp21 tmp23 = tmp17 + tmp22 tl.store(out_ptr0 + x2, tmp23, xmask) @triton.jit def triton_poi_fused_sum_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) get_raw_stream(0) triton_poi_fused_sigmoid_sum_0[grid(16)](arg0_1, buf0, 16, XBLOCK= 16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 64, 1), torch.float32) triton_poi_fused_div_mul_sigmoid_sum_1[grid(64)](arg1_1, arg0_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 buf2 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf0 triton_poi_fused_sum_2[grid(16)](buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf1 return buf2, class PcamPoolNew(nn.Module): def __init__(self): super(PcamPoolNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
iampartho/EEE426
PcamPool
false
3,644
[ "Apache-2.0" ]
0
a706660c0efcd4adea44d54c57a34bcaa4439ec1
https://github.com/iampartho/EEE426/tree/a706660c0efcd4adea44d54c57a34bcaa4439ec1
CAModule
import torch from torch import nn class CAModule(nn.Module): """ Re-implementation of Squeeze-and-Excitation (SE) block described in: *Hu et al., Squeeze-and-Excitation Networks, arXiv:1709.01507* code reference: https://github.com/kobiso/CBAM-keras/blob/master/models/attention_module.py """ def __init__(self, num_channels, reduc_ratio=2): super(CAModule, self).__init__() self.num_channels = num_channels self.reduc_ratio = reduc_ratio self.fc1 = nn.Linear(num_channels, num_channels // reduc_ratio, bias=True) self.fc2 = nn.Linear(num_channels // reduc_ratio, num_channels, bias=True) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() def forward(self, feat_map): gap_out = feat_map.view(feat_map.size()[0], self.num_channels, -1 ).mean(dim=2) fc1_out = self.relu(self.fc1(gap_out)) fc2_out = self.sigmoid(self.fc2(fc1_out)) fc2_out = fc2_out.view(fc2_out.size()[0], fc2_out.size()[1], 1, 1) feat_map = torch.mul(feat_map, fc2_out) return feat_map def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, 4), (4, 1)) assert_size_stride(primals_3, (2,), (1,)) assert_size_stride(primals_4, (4, 2), (2, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) buf2 = empty_strided_cuda((4, 2), (2, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_2, (4, 2), (1, 4 ), 0), out=buf2) del primals_2 buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(8)](buf3, primals_3, 8, XBLOCK=8, num_warps=1, num_stages=1) del primals_3 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf3, reinterpret_tensor(primals_4, (2, 4), (1, 2), 0), alpha=1, beta=1, out=buf4) del primals_5 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_2[grid(256)](primals_1, buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf5, primals_1, buf1, buf3, buf4, primals_4 class CAModuleNew(nn.Module): """ Re-implementation of Squeeze-and-Excitation (SE) block described in: *Hu et al., Squeeze-and-Excitation Networks, arXiv:1709.01507* code reference: https://github.com/kobiso/CBAM-keras/blob/master/models/attention_module.py """ def __init__(self, num_channels, reduc_ratio=2): super(CAModuleNew, self).__init__() self.num_channels = num_channels self.reduc_ratio = reduc_ratio self.fc1 = nn.Linear(num_channels, num_channels // reduc_ratio, bias=True) self.fc2 = nn.Linear(num_channels // reduc_ratio, num_channels, bias=True) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
iampartho/EEE426
CAModule
false
3,645
[ "Apache-2.0" ]
0
a706660c0efcd4adea44d54c57a34bcaa4439ec1
https://github.com/iampartho/EEE426/tree/a706660c0efcd4adea44d54c57a34bcaa4439ec1
LogSumExpPool
import torch from torch import nn class LogSumExpPool(nn.Module): def __init__(self, gamma): super(LogSumExpPool, self).__init__() self.gamma = gamma def forward(self, feat_map): """ Numerically stable implementation of the operation Arguments: feat_map(Tensor): tensor with shape (N, C, H, W) return(Tensor): tensor with shape (N, C, 1, 1) """ _N, _C, H, W = feat_map.shape m, _ = torch.max(feat_map, dim=-1, keepdim=True)[0].max(dim=-2, keepdim=True) value0 = feat_map - m area = 1.0 / (H * W) g = self.gamma return m + 1 / g * torch.log(area * torch.sum(torch.exp(g * value0), dim=(-1, -2), keepdim=True)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'gamma': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_exp_log_max_mul_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex r1 = rindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp8 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp10 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp18 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp24 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp26 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp31 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp9 = triton_helpers.maximum(tmp7, tmp8) tmp11 = triton_helpers.maximum(tmp9, tmp10) tmp13 = triton_helpers.maximum(tmp11, tmp12) tmp14 = triton_helpers.maximum(tmp6, tmp13) tmp17 = triton_helpers.maximum(tmp15, tmp16) tmp19 = triton_helpers.maximum(tmp17, tmp18) tmp21 = triton_helpers.maximum(tmp19, tmp20) tmp22 = triton_helpers.maximum(tmp14, tmp21) tmp25 = triton_helpers.maximum(tmp23, tmp24) tmp27 = triton_helpers.maximum(tmp25, tmp26) tmp29 = triton_helpers.maximum(tmp27, tmp28) tmp30 = triton_helpers.maximum(tmp22, tmp29) tmp32 = tmp31 - tmp30 tmp33 = 4.0 tmp34 = tmp32 * tmp33 tmp35 = tl_math.exp(tmp34) tmp36 = tl.broadcast_to(tmp35, [XBLOCK, RBLOCK]) tmp38 = tl.where(xmask, tmp36, 0) tmp39 = tl.sum(tmp38, 1)[:, None] tmp40 = 0.0625 tmp41 = tmp39 * tmp40 tmp42 = tl_math.log(tmp41) tmp43 = 0.25 tmp44 = tmp42 * tmp43 tmp45 = tmp30 + tmp44 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp45, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf2 = reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf1 get_raw_stream(0) triton_per_fused_add_exp_log_max_mul_sub_sum_0[grid(16)](buf2, arg0_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf2, class LogSumExpPoolNew(nn.Module): def __init__(self, gamma): super(LogSumExpPoolNew, self).__init__() self.gamma = gamma def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
iampartho/EEE426
LogSumExpPool
false
3,647
[ "Apache-2.0" ]
0
a706660c0efcd4adea44d54c57a34bcaa4439ec1
https://github.com/iampartho/EEE426/tree/a706660c0efcd4adea44d54c57a34bcaa4439ec1
SoftCrossEntropyLoss
import torch import torch.utils.data class SoftCrossEntropyLoss(torch.nn.Module): """SoftCrossEntropyLoss (useful for label smoothing and mixup). Identical to torch.nn.CrossEntropyLoss if used with one-hot labels.""" def __init__(self): super(SoftCrossEntropyLoss, self).__init__() def forward(self, x, y): loss = -y * torch.nn.functional.log_softmax(x, -1) return torch.sum(loss) / x.shape[0] def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + r2, None) tmp2 = tl.load(in_ptr1 + r2, None) tmp3 = tl.load(in_ptr1 + 4 * r1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (1 + 4 * r1), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * r1), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (3 + 4 * r1), None, eviction_policy='evict_last') tmp1 = -tmp0 tmp4 = tl_math.exp(tmp3) tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp12 = tl_math.exp(tmp11) tmp13 = tmp10 + tmp12 tmp14 = tl_math.log(tmp13) tmp15 = tmp2 - tmp14 tmp16 = tmp1 * tmp15 tmp17 = tl.broadcast_to(tmp16, [RBLOCK]) tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0)) tmp20 = 0.25 tmp21 = tmp19 * tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused__log_softmax_div_mul_neg_sum_1[grid(1)](buf2, arg0_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg0_1 del buf0 return buf2, class SoftCrossEntropyLossNew(torch.nn.Module): """SoftCrossEntropyLoss (useful for label smoothing and mixup). Identical to torch.nn.CrossEntropyLoss if used with one-hot labels.""" def __init__(self): super(SoftCrossEntropyLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
i-murray/pycls
SoftCrossEntropyLoss
false
3,648
[ "MIT" ]
0
858dac527eb11732ba08b94162d18b53454b9018
https://github.com/i-murray/pycls/tree/858dac527eb11732ba08b94162d18b53454b9018
CNN
import torch import torch.nn as nn import torch.nn.functional as F class CNN(nn.Module): def __init__(self, input_size=50, hidden_size=256, dropout=0, kernel_size=3, padding=1, activation_function=F.relu): """ Args: input_size: dimention of input embedding kernel_size: kernel_size for CNN padding: padding for CNN hidden_size: hidden size """ super().__init__() self.conv = nn.Conv1d(input_size, hidden_size, kernel_size, padding =padding) self.act = activation_function self.dropout = nn.Dropout(dropout) def forward(self, x): """ Args: input features: (B, L, I_EMBED) Return: output features: (B, H_EMBED) """ x = x.transpose(1, 2) x = self.conv(x) x = self.act(x) x = self.dropout(x) x = x.transpose(1, 2) return x def get_inputs(): return [torch.rand([4, 50, 50])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 200 xnumel = 50 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 50 y1 = yindex // 50 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 50 * x2 + 2500 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 50 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 50 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, None) tl.store(out_ptr0 + x3, tmp6, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 50, 50), (2500, 50, 1)) assert_size_stride(primals_2, (256, 50, 3), (150, 3, 1)) assert_size_stride(primals_3, (256,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 50, 50), (2500, 50, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(200, 50)](primals_1, buf0, 200, 50, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 256, 50), (12800, 50, 1)) del buf0 buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 256, 50), (12800, 50, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_1[grid(51200)]( buf2, primals_3, buf3, 51200, XBLOCK=256, num_warps=4, num_stages=1 ) del primals_3 return reinterpret_tensor(buf2, (4, 50, 256), (12800, 1, 50), 0 ), primals_2, reinterpret_tensor(primals_1, (4, 50, 50), (2500, 1, 50), 0), buf3 class CNNNew(nn.Module): def __init__(self, input_size=50, hidden_size=256, dropout=0, kernel_size=3, padding=1, activation_function=F.relu): """ Args: input_size: dimention of input embedding kernel_size: kernel_size for CNN padding: padding for CNN hidden_size: hidden size """ super().__init__() self.conv = nn.Conv1d(input_size, hidden_size, kernel_size, padding =padding) self.act = activation_function self.dropout = nn.Dropout(dropout) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
igorvlnascimento/DeepREF
CNN
false
3,649
[ "MIT" ]
0
0fed8120571e44e12ee3d1861289bc101c0a275f
https://github.com/igorvlnascimento/DeepREF/tree/0fed8120571e44e12ee3d1861289bc101c0a275f
ConvNet
import torch import torch.nn as nn class ConvNet(nn.Module): def __init__(self): super(ConvNet, self).__init__() self.conv1 = nn.Conv2d(1, 5, 6, 2) self.pool1 = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(5, 8, 3, 1) self.drp1 = nn.Dropout2d(0.25) self.pool2 = nn.MaxPool2d(2, 2) self.lin1 = nn.Linear(288, 10) def forward(self, x): x = self.conv1(x) x = torch.relu(x) x = self.pool1(x) x = self.conv2(x) x = self.drp1(x) x = torch.relu(x) x = self.pool2(x) x = x.view(-1, 288) x = self.lin1(x) return x def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 18000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 900 % 5 x2 = xindex // 4500 x4 = xindex % 4500 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x4 + 4512 * x2), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4500 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 15 x1 = xindex // 15 % 75 x2 = xindex // 1125 x3 = xindex % 1125 tmp0 = tl.load(in_ptr0 + (2 * x0 + 60 * x1 + 4512 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 60 * x1 + 4512 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (30 + 2 * x0 + 60 * x1 + 4512 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (31 + 2 * x0 + 60 * x1 + 4512 * x2), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3 + 1152 * x2), tmp6, xmask) tl.store(out_ptr1 + (x3 + 1152 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 5408 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 169 % 8 x2 = xindex // 1352 x4 = xindex % 1352 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x4 + 1376 * x2), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 % 8 x3 = xindex // 288 x4 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 26 * x1 + 169 * x2 + 1376 * x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 26 * x1 + 169 * x2 + 1376 * x3), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (13 + 2 * x0 + 26 * x1 + 169 * x2 + 1376 * x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (14 + 2 * x0 + 26 * x1 + 169 * x2 + 1376 * x3 ), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x4, tmp15, xmask) tl.store(out_ptr1 + x4, tmp16, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (5, 1, 6, 6), (36, 36, 6, 1)) assert_size_stride(primals_2, (5,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (8, 5, 3, 3), (45, 9, 3, 1)) assert_size_stride(primals_5, (8,), (1,)) assert_size_stride(primals_6, (10, 288), (288, 1)) assert_size_stride(primals_7, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 5, 30, 30), (4500, 900, 30, 1)) buf1 = empty_strided_cuda((4, 5, 30, 30), (4512, 900, 30, 1), torch .float32) get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(18000)](buf0, primals_2, buf1, 18000, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 5, 15, 15), (1152, 225, 15, 1), torch .float32) buf3 = empty_strided_cuda((4, 5, 15, 15), (1152, 225, 15, 1), torch .int8) triton_poi_fused_max_pool2d_with_indices_1[grid(4500)](buf1, buf2, buf3, 4500, XBLOCK=256, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 8, 13, 13), (1352, 169, 13, 1)) buf5 = empty_strided_cuda((4, 8, 13, 13), (1376, 169, 13, 1), torch .float32) triton_poi_fused_convolution_relu_2[grid(5408)](buf4, primals_5, buf5, 5408, XBLOCK=128, num_warps=4, num_stages=1) del buf4 del primals_5 buf6 = empty_strided_cuda((4, 8, 6, 6), (288, 36, 6, 1), torch.int8) buf7 = empty_strided_cuda((4, 8, 6, 6), (288, 36, 6, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_3[grid(1152)](buf5, buf6, buf7, 1152, XBLOCK=256, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf7, (4, 288), (288, 1), 0), reinterpret_tensor(primals_6, (288, 10), (1, 288), 0), alpha=1, beta=1, out=buf8) del primals_7 return (buf8, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 288), (288, 1), 0), primals_6) class ConvNetNew(nn.Module): def __init__(self): super(ConvNetNew, self).__init__() self.conv1 = nn.Conv2d(1, 5, 6, 2) self.pool1 = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(5, 8, 3, 1) self.drp1 = nn.Dropout2d(0.25) self.pool2 = nn.MaxPool2d(2, 2) self.lin1 = nn.Linear(288, 10) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.lin1.weight primals_7 = self.lin1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
iOsnaaente/Faculdade_ECA-UFSM
ConvNet
false
3,650
[ "MIT" ]
0
aea8b8d66169b073c439b47ad990e45695cbe953
https://github.com/iOsnaaente/Faculdade_ECA-UFSM/tree/aea8b8d66169b073c439b47ad990e45695cbe953
RAddFloat
import torch import torch._utils class RAddFloat(torch.nn.Module): def __init__(self): super(RAddFloat, self).__init__() def forward(self, x): y = 1.0 + x y = y + y + 1 y = y + y + 1 x = y + x return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch._utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp3 = tmp2 + tmp2 tmp4 = tmp3 + tmp1 tmp5 = tmp4 + tmp4 tmp6 = tmp5 + tmp1 tmp7 = tmp6 + tmp0 tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class RAddFloatNew(torch.nn.Module): def __init__(self): super(RAddFloatNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ijinjay/torch2mindspore
RAddFloat
false
3,652
[ "MIT" ]
0
e4c06bd5e8a3b25b72bf158393a66c5cd1b572d2
https://github.com/ijinjay/torch2mindspore/tree/e4c06bd5e8a3b25b72bf158393a66c5cd1b572d2
Model
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, input_size, output_size): super(Model, self).__init__() hidden2_size = int(input_size / 2) hidden1_size = int((input_size + hidden2_size) * 3 / 2) hidden3_size = int((output_size + hidden2_size) * 3 / 2) self.hidden1 = nn.Linear(input_size, hidden1_size) self.hidden2 = nn.Linear(hidden1_size, hidden2_size) self.hidden3 = nn.Linear(hidden2_size, hidden3_size) nn.init.xavier_uniform_(self.hidden1.weight) nn.init.xavier_uniform_(self.hidden2.weight) nn.init.xavier_uniform_(self.hidden3.weight) self.predict = nn.Linear(hidden3_size, output_size) nn.init.xavier_uniform_(self.predict.weight) def forward(self, x): x = F.relu(self.hidden1(x)) x = F.relu(self.hidden2(x)) x = F.relu(self.hidden3(x)) x = self.predict(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 9 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (9, 4), (4, 1)) assert_size_stride(primals_2, (9,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (2, 9), (9, 1)) assert_size_stride(primals_5, (2,), (1,)) assert_size_stride(primals_6, (9, 2), (2, 1)) assert_size_stride(primals_7, (9,), (1,)) assert_size_stride(primals_8, (4, 9), (9, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 9), (9, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 9), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 9), (144, 36, 9, 1), 0) del buf0 buf9 = empty_strided_cuda((4, 4, 4, 9), (144, 36, 9, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(576)](buf1, primals_2, buf9, 576, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 9), (9, 1), 0), reinterpret_tensor(primals_4, (9, 2), (1, 9), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 2), (32, 8, 2, 1), 0) del buf2 buf8 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(128)](buf3, primals_5, buf8, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 9), (9, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 2), (2, 1), 0), reinterpret_tensor(primals_6, (2, 9), (1, 2), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 9), (144, 36, 9, 1), 0) del buf4 buf7 = empty_strided_cuda((4, 4, 4, 9), (144, 36, 9, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(576)](buf5, primals_7, buf7, 576, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 9), ( 9, 1), 0), reinterpret_tensor(primals_8, (9, 4), (1, 9), 0), alpha=1, beta=1, out=buf6) del primals_9 return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 9), (9, 1), 0), reinterpret_tensor( buf3, (64, 2), (2, 1), 0), reinterpret_tensor(buf5, (64, 9), (9, 1), 0 ), primals_8, buf7, primals_6, buf8, primals_4, buf9 class ModelNew(nn.Module): def __init__(self, input_size, output_size): super(ModelNew, self).__init__() hidden2_size = int(input_size / 2) hidden1_size = int((input_size + hidden2_size) * 3 / 2) hidden3_size = int((output_size + hidden2_size) * 3 / 2) self.hidden1 = nn.Linear(input_size, hidden1_size) self.hidden2 = nn.Linear(hidden1_size, hidden2_size) self.hidden3 = nn.Linear(hidden2_size, hidden3_size) nn.init.xavier_uniform_(self.hidden1.weight) nn.init.xavier_uniform_(self.hidden2.weight) nn.init.xavier_uniform_(self.hidden3.weight) self.predict = nn.Linear(hidden3_size, output_size) nn.init.xavier_uniform_(self.predict.weight) def forward(self, input_0): primals_1 = self.hidden1.weight primals_2 = self.hidden1.bias primals_4 = self.hidden2.weight primals_5 = self.hidden2.bias primals_6 = self.hidden3.weight primals_7 = self.hidden3.bias primals_8 = self.predict.weight primals_9 = self.predict.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
iasakura/tiramisu
Model
false
3,653
[ "MIT" ]
0
71aae95424dcca6ab920ab13e6e882006f13629d
https://github.com/iasakura/tiramisu/tree/71aae95424dcca6ab920ab13e6e882006f13629d
Padding2
import torch import torch._utils class Padding2(torch.nn.Module): def __init__(self, input_channel): super(Padding2, self).__init__() self.requires_grad = False self.conv = torch.nn.ConvTranspose2d(input_channel, input_channel, 1, stride=2, padding=0, groups=input_channel, bias=False) torch.nn.init.constant_(self.conv.weight, 1) def forward(self, x): x1 = self.conv(x) y = torch.nn.functional.pad(x1, (1, 0, 0, 1)) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channel': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch._utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x4 = xindex tmp0 = x1 tmp1 = tl.full([1], 7, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = -1 + x0 tmp4 = tl.full([1], 0, tl.int64) tmp5 = tmp3 >= tmp4 tmp6 = tmp2 & tmp5 tmp7 = tl.load(in_ptr0 + (-1 + x0 + 7 * x1 + 49 * x2), tmp6 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp7, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf0, (4, 4, 7, 7), (196, 49, 7, 1)) buf1 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(1024)](buf0, buf1, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf0 return buf1, primals_1, primals_2 class Padding2New(torch.nn.Module): def __init__(self, input_channel): super(Padding2New, self).__init__() self.requires_grad = False self.conv = torch.nn.ConvTranspose2d(input_channel, input_channel, 1, stride=2, padding=0, groups=input_channel, bias=False) torch.nn.init.constant_(self.conv.weight, 1) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
ijinjay/torch2mindspore
Padding2
false
3,654
[ "MIT" ]
0
e4c06bd5e8a3b25b72bf158393a66c5cd1b572d2
https://github.com/ijinjay/torch2mindspore/tree/e4c06bd5e8a3b25b72bf158393a66c5cd1b572d2
MolDQN
import torch import torch.nn as nn class MolDQN(nn.Module): def __init__(self, input_length, output_length): super(MolDQN, self).__init__() self.linear_1 = nn.Linear(input_length, 1024) self.linear_2 = nn.Linear(1024, 512) self.linear_3 = nn.Linear(512, 128) self.linear_4 = nn.Linear(128, 32) self.linear_5 = nn.Linear(32, output_length) self.activation = nn.ReLU() def forward(self, x): x = self.activation(self.linear_1(x)) x = self.activation(self.linear_2(x)) x = self.activation(self.linear_3(x)) x = self.activation(self.linear_4(x)) x = self.linear_5(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_length': 4, 'output_length': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (1024, 4), (4, 1)) assert_size_stride(primals_2, (1024,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (512, 1024), (1024, 1)) assert_size_stride(primals_5, (512,), (1,)) assert_size_stride(primals_6, (128, 512), (512, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (32, 128), (128, 1)) assert_size_stride(primals_9, (32,), (1,)) assert_size_stride(primals_10, (4, 32), (32, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1024), (1024, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1024), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1024), (16384, 4096, 1024, 1), 0) del buf0 buf12 = empty_strided_cuda((4, 4, 4, 1024), (16384, 4096, 1024, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(65536)](buf1, primals_2, buf12, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 1024), (1024, 1), 0 ), reinterpret_tensor(primals_4, (1024, 512), (1, 1024), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 512), (8192, 2048, 512, 1), 0 ) del buf2 buf11 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(32768)](buf3, primals_5, buf11, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 512), (512, 1), 0), reinterpret_tensor(primals_6, (512, 128), (1, 512), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf4 buf10 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(8192)](buf5, primals_7, buf10, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (64, 128), (128, 1), 0), reinterpret_tensor(primals_8, (128, 32), (1, 128), 0), out=buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 32), (512, 128, 32, 1), 0) del buf6 buf9 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) triton_poi_fused_relu_threshold_backward_3[grid(2048)](buf7, primals_9, buf9, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_9 buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf7, (64, 32), (32, 1), 0), reinterpret_tensor(primals_10, (32, 4), (1, 32), 0 ), alpha=1, beta=1, out=buf8) del primals_11 return reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 1024), (1024, 1), 0 ), reinterpret_tensor(buf3, (64, 512), (512, 1), 0 ), reinterpret_tensor(buf5, (64, 128), (128, 1), 0 ), reinterpret_tensor(buf7, (64, 32), (32, 1), 0 ), primals_10, buf9, primals_8, buf10, primals_6, buf11, primals_4, buf12 class MolDQNNew(nn.Module): def __init__(self, input_length, output_length): super(MolDQNNew, self).__init__() self.linear_1 = nn.Linear(input_length, 1024) self.linear_2 = nn.Linear(1024, 512) self.linear_3 = nn.Linear(512, 128) self.linear_4 = nn.Linear(128, 32) self.linear_5 = nn.Linear(32, output_length) self.activation = nn.ReLU() def forward(self, input_0): primals_1 = self.linear_1.weight primals_2 = self.linear_1.bias primals_4 = self.linear_2.weight primals_5 = self.linear_2.bias primals_6 = self.linear_3.weight primals_7 = self.linear_3.bias primals_8 = self.linear_4.weight primals_9 = self.linear_4.bias primals_10 = self.linear_5.weight primals_11 = self.linear_5.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
iamchosenlee/MolDQN-pytorch
MolDQN
false
3,655
[ "MIT" ]
0
66bd1e067e439e49abc77d21089d3baf065317d4
https://github.com/iamchosenlee/MolDQN-pytorch/tree/66bd1e067e439e49abc77d21089d3baf065317d4
Padding1
import torch import torch._utils class Padding1(torch.nn.Module): def __init__(self, input_channel): super(Padding1, self).__init__() self.requires_grad = False self.conv = torch.nn.ConvTranspose2d(input_channel, input_channel, 1, stride=2, padding=0, groups=input_channel, bias=False) torch.nn.init.constant_(self.conv.weight, 1) def forward(self, x): x1 = self.conv(x) y = torch.nn.functional.pad(x1, (0, 1, 0, 1)) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channel': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch._utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 7, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = x0 tmp4 = tmp3 < tmp1 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (x0 + 7 * x1 + 49 * x2), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf0, (4, 4, 7, 7), (196, 49, 7, 1)) buf1 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(1024)](buf0, buf1, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf0 return buf1, primals_1, primals_2 class Padding1New(torch.nn.Module): def __init__(self, input_channel): super(Padding1New, self).__init__() self.requires_grad = False self.conv = torch.nn.ConvTranspose2d(input_channel, input_channel, 1, stride=2, padding=0, groups=input_channel, bias=False) torch.nn.init.constant_(self.conv.weight, 1) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
ijinjay/torch2mindspore
Padding1
false
3,656
[ "MIT" ]
0
e4c06bd5e8a3b25b72bf158393a66c5cd1b572d2
https://github.com/ijinjay/torch2mindspore/tree/e4c06bd5e8a3b25b72bf158393a66c5cd1b572d2
Padding3
import torch import torch._utils class Padding3(torch.nn.Module): def __init__(self, input_channel): super(Padding3, self).__init__() self.requires_grad = False self.conv = torch.nn.ConvTranspose2d(input_channel, input_channel, 1, stride=2, padding=0, groups=input_channel, bias=False) torch.nn.init.constant_(self.conv.weight, 1) def forward(self, x): x1 = self.conv(x) y = torch.nn.functional.pad(x1, (0, 1, 1, 0)) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channel': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch._utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x3 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = x0 tmp4 = tl.full([1], 7, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp2 & tmp5 tmp7 = tl.load(in_ptr0 + (-7 + x0 + 7 * x1 + 49 * x2), tmp6 & xmask, other=0.0) tl.store(out_ptr0 + x3, tmp7, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf0, (4, 4, 7, 7), (196, 49, 7, 1)) buf1 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(1024)](buf0, buf1, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf0 return buf1, primals_1, primals_2 class Padding3New(torch.nn.Module): def __init__(self, input_channel): super(Padding3New, self).__init__() self.requires_grad = False self.conv = torch.nn.ConvTranspose2d(input_channel, input_channel, 1, stride=2, padding=0, groups=input_channel, bias=False) torch.nn.init.constant_(self.conv.weight, 1) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
ijinjay/torch2mindspore
Padding3
false
3,657
[ "MIT" ]
0
e4c06bd5e8a3b25b72bf158393a66c5cd1b572d2
https://github.com/ijinjay/torch2mindspore/tree/e4c06bd5e8a3b25b72bf158393a66c5cd1b572d2
SP
import torch import torch.nn as nn import torch._utils def sp_init(x): x01 = x[:, :, 0::2, :] x02 = x[:, :, 1::2, :] x_LL = x01[:, :, :, 0::2] x_HL = x02[:, :, :, 0::2] x_LH = x01[:, :, :, 1::2] x_HH = x02[:, :, :, 1::2] return torch.cat((x_LL, x_HL, x_LH, x_HH), 1) class SP(nn.Module): def __init__(self): super(SP, self).__init__() self.requires_grad = False def forward(self, x): return sp_init(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch._utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 4 % 16 x0 = xindex % 2 x1 = xindex // 2 % 2 x3 = xindex // 64 x4 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2 + 64 * x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1 + 16 * (-4 + x2) + 64 * x3), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1 + 16 * (-8 + x2) + 64 * x3), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp19 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1 + 16 * (-12 + x2) + 64 * x3), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + x4, tmp22, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 2, 2), (64, 4, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, def sp_init(x): x01 = x[:, :, 0::2, :] x02 = x[:, :, 1::2, :] x_LL = x01[:, :, :, 0::2] x_HL = x02[:, :, :, 0::2] x_LH = x01[:, :, :, 1::2] x_HH = x02[:, :, :, 1::2] return torch.cat((x_LL, x_HL, x_LH, x_HH), 1) class SPNew(nn.Module): def __init__(self): super(SPNew, self).__init__() self.requires_grad = False def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ijinjay/torch2mindspore
SP
false
3,658
[ "MIT" ]
0
e4c06bd5e8a3b25b72bf158393a66c5cd1b572d2
https://github.com/ijinjay/torch2mindspore/tree/e4c06bd5e8a3b25b72bf158393a66c5cd1b572d2
Padding4
import torch import torch._utils class Padding4(torch.nn.Module): def __init__(self, input_channel): super(Padding4, self).__init__() self.requires_grad = False self.conv = torch.nn.ConvTranspose2d(input_channel, input_channel, 1, stride=2, padding=0, groups=input_channel, bias=False) torch.nn.init.constant_(self.conv.weight, 1) def forward(self, x): x1 = self.conv(x) y = torch.nn.functional.pad(x1, (1, 0, 1, 0)) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channel': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch._utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -1 + x0 tmp4 = tmp3 >= tmp1 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-8 + x0 + 7 * x1 + 49 * x2), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp6, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf0, (4, 4, 7, 7), (196, 49, 7, 1)) buf1 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(1024)](buf0, buf1, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf0 return buf1, primals_1, primals_2 class Padding4New(torch.nn.Module): def __init__(self, input_channel): super(Padding4New, self).__init__() self.requires_grad = False self.conv = torch.nn.ConvTranspose2d(input_channel, input_channel, 1, stride=2, padding=0, groups=input_channel, bias=False) torch.nn.init.constant_(self.conv.weight, 1) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
ijinjay/torch2mindspore
Padding4
false
3,659
[ "MIT" ]
0
e4c06bd5e8a3b25b72bf158393a66c5cd1b572d2
https://github.com/ijinjay/torch2mindspore/tree/e4c06bd5e8a3b25b72bf158393a66c5cd1b572d2
Custom
import torch import torch._utils class Custom(torch.nn.Module): def __init__(self): super(Custom, self).__init__() self.conv = torch.nn.Conv2d(3, 3, 1, 1) self.conv1 = torch.nn.Conv2d(3, 3, 1, 1) self.conv2 = torch.nn.Conv2d(3, 3, 1, 1) self.relu = torch.nn.ReLU() def forward(self, x): _ = self.conv(x) _ = self.conv1(_) _ = x + _ _ = self.relu(_) t = self.conv2(_) t = self.relu(t) r = torch.cat([_, t]) return r * 100 def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch._utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 3 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_add_convolution_relu_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 3 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_out_ptr0 + x3, None) tmp2 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(in_out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_cat_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 12288 x3 = xindex % 12288 x1 = xindex // 4096 % 3 x4 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x3 + 12288 * x2), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x3 + 12288 * (-4 + x2)), tmp6, other=0.0) tmp10 = tl.load(in_ptr2 + x1, tmp6, eviction_policy='evict_last', other=0.0 ) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp6, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tmp17 = 100.0 tmp18 = tmp16 * tmp17 tl.store(out_ptr0 + x4, tmp18, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 3 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (3, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_2, (3,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (3, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_5, (3,), (1,)) assert_size_stride(primals_6, (3, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_7, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(49152)](buf1, primals_2, 49152, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_add_convolution_relu_1[grid(49152)](buf3, primals_3, primals_5, 49152, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf5 = empty_strided_cuda((8, 3, 64, 64), (12288, 4096, 64, 1), torch.float32) triton_poi_fused_cat_mul_2[grid(98304)](buf3, buf4, primals_7, buf5, 98304, XBLOCK=1024, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_3[grid(49152)]( buf4, primals_7, buf6, 49152, XBLOCK=256, num_warps=4, num_stages=1 ) del buf4 del primals_7 return buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3, buf6 class CustomNew(torch.nn.Module): def __init__(self): super(CustomNew, self).__init__() self.conv = torch.nn.Conv2d(3, 3, 1, 1) self.conv1 = torch.nn.Conv2d(3, 3, 1, 1) self.conv2 = torch.nn.Conv2d(3, 3, 1, 1) self.relu = torch.nn.ReLU() def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_4 = self.conv1.weight primals_5 = self.conv1.bias primals_6 = self.conv2.weight primals_7 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
ijinjay/torch2mindspore
Custom
false
3,660
[ "MIT" ]
0
e4c06bd5e8a3b25b72bf158393a66c5cd1b572d2
https://github.com/ijinjay/torch2mindspore/tree/e4c06bd5e8a3b25b72bf158393a66c5cd1b572d2
SALayer
import torch import torch.nn as nn import torch.utils.model_zoo class SALayer(nn.Module): def __init__(self, channel, kernel_size=3): super(SALayer, self).__init__() self.conv_sa = nn.Conv2d(channel, channel, kernel_size, padding=1, groups=channel) def forward(self, x): y = self.conv_sa(x) return x * y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channel': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.model_zoo assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_out_ptr0 + x3, xmask) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 * tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_mul_0[grid(256)](buf1, primals_3, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class SALayerNew(nn.Module): def __init__(self, channel, kernel_size=3): super(SALayerNew, self).__init__() self.conv_sa = nn.Conv2d(channel, channel, kernel_size, padding=1, groups=channel) def forward(self, input_0): primals_1 = self.conv_sa.weight primals_2 = self.conv_sa.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
iariav/EDSR-PyTorch
SALayer
false
3,661
[ "MIT" ]
0
c709b3d43adb6c2457cf87c37c1f34a7bcfc48bb
https://github.com/iariav/EDSR-PyTorch/tree/c709b3d43adb6c2457cf87c37c1f34a7bcfc48bb
Generator
import torch import torch.nn as nn class Generator(nn.Module): """Define standard linear + softmax generation step.""" def __init__(self, size, vocab): super(Generator, self).__init__() self.size = size self.proj = nn.Linear(self.size, vocab) def forward(self, x): sliced_x = x[:, 0, :] out = self.proj(sliced_x) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'size': 4, 'vocab': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused_add_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 return buf2, reinterpret_tensor(buf0, (16, 4), (4, 1), 0) class GeneratorNew(nn.Module): """Define standard linear + softmax generation step.""" def __init__(self, size, vocab): super(GeneratorNew, self).__init__() self.size = size self.proj = nn.Linear(self.size, vocab) def forward(self, input_0): primals_2 = self.proj.weight primals_3 = self.proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
icdmtlog/icdm2021tlog
Generator
false
3,662
[ "Apache-2.0" ]
0
6f92cce926b923d8f03689ddbeef3ac09d23712e
https://github.com/icdmtlog/icdm2021tlog/tree/6f92cce926b923d8f03689ddbeef3ac09d23712e
GLU
import torch from torch import Tensor from torch import nn as nn import torch.nn.functional as F class MonteCarloDropout(nn.Dropout): """ Defines Monte Carlo dropout Module as defined in the paper https://arxiv.org/pdf/1506.02142.pdf. In summary, This technique uses the regular dropout which can be interpreted as a Bayesian approximation of a well-known probabilistic model: the Gaussian process. We can treat the many different networks (with different neurons dropped out) as Monte Carlo samples from the space of all available models. This provides mathematical grounds to reason about the model’s uncertainty and, as it turns out, often improves its performance. """ mc_dropout_enabled: 'bool' = False def train(self, mode: 'bool'=True): if mode: self.mc_dropout_enabled = True def forward(self, input: 'Tensor') ->Tensor: return F.dropout(input, self.p, self.mc_dropout_enabled, self.inplace) class FeedForward(nn.Module): """ ## FFN module source [FeedForward network](https://arxiv.org/abs/2002.05202) """ def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1, activation=nn.ReLU(), is_gated: 'bool'=False, bias1: 'bool'=True, bias2: 'bool'=True, bias_gate: 'bool'=True): """ * `d_model` is the number of features in a token embedding * `d_ff` is the number of features in the hidden layer of the FFN * `dropout` is dropout probability for the hidden layer, compatible with Monte Carlo dropout at inference time * `is_gated` specifies whether the hidden layer is gated * `bias1` specified whether the first fully connected layer should have a learnable bias * `bias2` specified whether the second fully connected layer should have a learnable bias * `bias_gate` specified whether the fully connected layer for the gate should have a learnable bias """ super().__init__() self.layer1 = nn.Linear(d_model, d_ff, bias=bias1) self.layer2 = nn.Linear(d_ff, d_model, bias=bias2) self.dropout = MonteCarloDropout(dropout) self.activation = activation self.is_gated = is_gated if is_gated: self.linear_v = nn.Linear(d_model, d_ff, bias=bias_gate) def forward(self, x: 'torch.Tensor'): g = self.activation(self.layer1(x)) if self.is_gated: x = g * self.linear_v(x) else: x = g x = self.dropout(x) return self.layer2(x) class GLU(nn.Module): def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1): super().__init__() self.ffn = FeedForward(d_model, d_ff, dropout, nn.Sigmoid(), True, False, False, False) def forward(self, x: 'torch.Tensor'): return self.ffn(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'd_ff': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import Tensor from torch import nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_0[grid(256)](buf0, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), buf0, buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4 class MonteCarloDropout(nn.Dropout): """ Defines Monte Carlo dropout Module as defined in the paper https://arxiv.org/pdf/1506.02142.pdf. In summary, This technique uses the regular dropout which can be interpreted as a Bayesian approximation of a well-known probabilistic model: the Gaussian process. We can treat the many different networks (with different neurons dropped out) as Monte Carlo samples from the space of all available models. This provides mathematical grounds to reason about the model’s uncertainty and, as it turns out, often improves its performance. """ mc_dropout_enabled: 'bool' = False def train(self, mode: 'bool'=True): if mode: self.mc_dropout_enabled = True def forward(self, input: 'Tensor') ->Tensor: return F.dropout(input, self.p, self.mc_dropout_enabled, self.inplace) class FeedForward(nn.Module): """ ## FFN module source [FeedForward network](https://arxiv.org/abs/2002.05202) """ def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1, activation=nn.ReLU(), is_gated: 'bool'=False, bias1: 'bool'=True, bias2: 'bool'=True, bias_gate: 'bool'=True): """ * `d_model` is the number of features in a token embedding * `d_ff` is the number of features in the hidden layer of the FFN * `dropout` is dropout probability for the hidden layer, compatible with Monte Carlo dropout at inference time * `is_gated` specifies whether the hidden layer is gated * `bias1` specified whether the first fully connected layer should have a learnable bias * `bias2` specified whether the second fully connected layer should have a learnable bias * `bias_gate` specified whether the fully connected layer for the gate should have a learnable bias """ super().__init__() self.layer1 = nn.Linear(d_model, d_ff, bias=bias1) self.layer2 = nn.Linear(d_ff, d_model, bias=bias2) self.dropout = MonteCarloDropout(dropout) self.activation = activation self.is_gated = is_gated if is_gated: self.linear_v = nn.Linear(d_model, d_ff, bias=bias_gate) def forward(self, x: 'torch.Tensor'): g = self.activation(self.layer1(x)) if self.is_gated: x = g * self.linear_v(x) else: x = g x = self.dropout(x) return self.layer2(x) class GLUNew(nn.Module): def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1): super().__init__() self.ffn = FeedForward(d_model, d_ff, dropout, nn.Sigmoid(), True, False, False, False) def forward(self, input_0): primals_1 = self.ffn.layer1.weight primals_3 = self.ffn.layer2.weight primals_4 = self.ffn.linear_v.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
gdevos010/darts
GLU
false
3,663
[ "Apache-2.0" ]
0
96c97c1e241500ae7b91d32bbfa21d811e4a7d71
https://github.com/gdevos010/darts/tree/96c97c1e241500ae7b91d32bbfa21d811e4a7d71
ConvHeadPooling
import torch import torch.nn as nn from typing import Tuple class ConvHeadPooling(nn.Module): def __init__(self, in_feature, out_feature, stride, padding_mode='zeros'): super(ConvHeadPooling, self).__init__() self.conv = nn.Conv2d(in_feature, out_feature, kernel_size=stride + 1, padding=stride // 2, stride=stride, padding_mode= padding_mode, groups=in_feature) self.fc = nn.Linear(in_feature, out_feature) def forward(self, x, cls_token) ->Tuple[torch.Tensor, torch.Tensor]: x = self.conv(x) cls_token = self.fc(cls_token) return x, cls_token def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_feature': 4, 'out_feature': 4, 'stride': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 9 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 1, 2, 2), (4, 4, 2, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(144)](buf1, primals_2, 144, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf2) del primals_4 del primals_5 return buf1, reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_1, primals_3, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0) class ConvHeadPoolingNew(nn.Module): def __init__(self, in_feature, out_feature, stride, padding_mode='zeros'): super(ConvHeadPoolingNew, self).__init__() self.conv = nn.Conv2d(in_feature, out_feature, kernel_size=stride + 1, padding=stride // 2, stride=stride, padding_mode= padding_mode, groups=in_feature) self.fc = nn.Linear(in_feature, out_feature) def forward(self, input_0, input_1): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_4 = self.fc.weight primals_5 = self.fc.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0], output[1]
iliasprc/Compact-Transformers
ConvHeadPooling
false
3,664
[ "Apache-2.0" ]
0
31975a0b4469854dfb0e0cbcedd8f0698cf84a7e
https://github.com/iliasprc/Compact-Transformers/tree/31975a0b4469854dfb0e0cbcedd8f0698cf84a7e
ContrastiveLoss
import torch from numpy.random import * import torch.onnx import torch.nn.functional as F class ContrastiveLoss(torch.nn.Module): def __init__(self, margin=2): super(ContrastiveLoss, self).__init__() self.margin = margin def forward(self, output1, output2, label): euclidean_distance = F.pairwise_distance(output1, output2) loss_contrastive = torch.mean((1 - label) * torch.pow( euclidean_distance, 2) + label * torch.pow(torch.clamp(self. margin - euclidean_distance, min=0.0), 2)) return loss_contrastive def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from numpy.random import * import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_norm_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 - tmp1 tmp3 = 1e-06 tmp4 = tmp2 + tmp3 tmp5 = tmp4 * tmp4 tmp8 = tmp6 - tmp7 tmp9 = tmp8 + tmp3 tmp10 = tmp9 * tmp9 tmp11 = tmp5 + tmp10 tmp14 = tmp12 - tmp13 tmp15 = tmp14 + tmp3 tmp16 = tmp15 * tmp15 tmp17 = tmp11 + tmp16 tmp20 = tmp18 - tmp19 tmp21 = tmp20 + tmp3 tmp22 = tmp21 * tmp21 tmp23 = tmp17 + tmp22 tmp24 = libdevice.sqrt(tmp23) tl.store(out_ptr0 + x0, tmp24, xmask) @triton.jit def triton_per_fused_add_clamp_mean_mul_pow_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex r0 = rindex % 64 tmp0 = tl.load(in_ptr0 + r2, None) tmp3 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp3 * tmp3 tmp5 = tmp2 * tmp4 tmp6 = 2.0 tmp7 = tmp6 - tmp3 tmp8 = 0.0 tmp9 = triton_helpers.maximum(tmp7, tmp8) tmp10 = tmp9 * tmp9 tmp11 = tmp0 * tmp10 tmp12 = tmp5 + tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_norm_sub_0[grid(64)](arg1_1, arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused_add_clamp_mean_mul_pow_rsub_1[grid(1)](buf2, arg2_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg2_1 del buf0 return buf2, class ContrastiveLossNew(torch.nn.Module): def __init__(self, margin=2): super(ContrastiveLossNew, self).__init__() self.margin = margin def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
ioarun/pcb-fault-detection
ContrastiveLoss
false
3,665
[ "MIT" ]
0
d05deb724f86c4f89bdb816c07229bfba6420c14
https://github.com/ioarun/pcb-fault-detection/tree/d05deb724f86c4f89bdb816c07229bfba6420c14
SoftDetectionModule
import torch import torch.nn.functional as F import torch.nn as nn class SoftDetectionModule(nn.Module): def __init__(self, soft_local_max_size=3): super(SoftDetectionModule, self).__init__() self.soft_local_max_size = soft_local_max_size self.pad = self.soft_local_max_size // 2 def forward(self, batch): b = batch.size(0) batch = F.relu(batch) max_per_sample = torch.max(batch.view(b, -1), dim=1)[0] exp = torch.exp(batch / max_per_sample.view(b, 1, 1, 1)) sum_exp = self.soft_local_max_size ** 2 * F.avg_pool2d(F.pad(exp, [ self.pad] * 4, mode='constant', value=1.0), self. soft_local_max_size, stride=1) local_max_score = exp / sum_exp depth_wise_max = torch.max(batch, dim=1)[0] depth_wise_max_score = batch / depth_wise_max.unsqueeze(1) all_scores = local_max_score * depth_wise_max_score score = torch.max(all_scores, dim=1)[0] score = score / torch.sum(score.view(b, -1), dim=1).view(b, 1, 1) return score def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_max_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, float('-inf')) tmp6 = triton_helpers.max2(tmp5, 1)[:, None] tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_div_exp_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x4 = xindex // 36 x3 = xindex // 144 x6 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x4), tmp10 & xmask, other=0.0) tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tl.load(in_ptr1 + x3, tmp10 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tmp13 / tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tl.full(tmp16.shape, 1.0, tmp16.dtype) tmp18 = tl.where(tmp10, tmp16, tmp17) tl.store(out_ptr0 + x6, tmp18, xmask) @triton.jit def triton_poi_fused_avg_pool2d_constant_pad_nd_div_exp_relu_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 6 * x1 + 36 * x2), xmask) tmp1 = tl.load(in_ptr0 + (1 + x0 + 6 * x1 + 36 * x2), xmask) tmp3 = tl.load(in_ptr0 + (2 + x0 + 6 * x1 + 36 * x2), xmask) tmp5 = tl.load(in_ptr0 + (6 + x0 + 6 * x1 + 36 * x2), xmask) tmp7 = tl.load(in_ptr0 + (7 + x0 + 6 * x1 + 36 * x2), xmask) tmp9 = tl.load(in_ptr0 + (8 + x0 + 6 * x1 + 36 * x2), xmask) tmp11 = tl.load(in_ptr0 + (12 + x0 + 6 * x1 + 36 * x2), xmask) tmp13 = tl.load(in_ptr0 + (13 + x0 + 6 * x1 + 36 * x2), xmask) tmp15 = tl.load(in_ptr0 + (14 + x0 + 6 * x1 + 36 * x2), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp17 = 0.1111111111111111 tmp18 = tmp16 * tmp17 tl.store(out_ptr0 + x3, tmp18, xmask) @triton.jit def triton_per_fused_div_exp_max_mul_relu_sum_3(in_ptr0, in_ptr1, in_ptr2, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (r1 + 64 * x0), xmask, other=0.0) tmp10 = tl.load(in_ptr0 + (16 + r1 + 64 * x0), xmask, other=0.0) tmp13 = tl.load(in_ptr0 + (32 + r1 + 64 * x0), xmask, other=0.0) tmp16 = tl.load(in_ptr0 + (48 + r1 + 64 * x0), xmask, other=0.0) tmp23 = tl.load(in_ptr2 + (16 + r1 + 64 * x0), xmask, other=0.0) tmp31 = tl.load(in_ptr2 + (32 + r1 + 64 * x0), xmask, other=0.0) tmp39 = tl.load(in_ptr2 + (48 + r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 / tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = 9.0 tmp8 = tmp6 * tmp7 tmp9 = tmp5 / tmp8 tmp11 = triton_helpers.maximum(tmp1, tmp10) tmp12 = triton_helpers.maximum(tmp2, tmp11) tmp14 = triton_helpers.maximum(tmp1, tmp13) tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp17 = triton_helpers.maximum(tmp1, tmp16) tmp18 = triton_helpers.maximum(tmp15, tmp17) tmp19 = tmp2 / tmp18 tmp20 = tmp9 * tmp19 tmp21 = tmp11 / tmp3 tmp22 = tl_math.exp(tmp21) tmp24 = tmp23 * tmp7 tmp25 = tmp22 / tmp24 tmp26 = tmp11 / tmp18 tmp27 = tmp25 * tmp26 tmp28 = triton_helpers.maximum(tmp20, tmp27) tmp29 = tmp14 / tmp3 tmp30 = tl_math.exp(tmp29) tmp32 = tmp31 * tmp7 tmp33 = tmp30 / tmp32 tmp34 = tmp14 / tmp18 tmp35 = tmp33 * tmp34 tmp36 = triton_helpers.maximum(tmp28, tmp35) tmp37 = tmp17 / tmp3 tmp38 = tl_math.exp(tmp37) tmp40 = tmp39 * tmp7 tmp41 = tmp38 / tmp40 tmp42 = tmp17 / tmp18 tmp43 = tmp41 * tmp42 tmp44 = triton_helpers.maximum(tmp36, tmp43) tmp45 = tl.broadcast_to(tmp44, [XBLOCK, RBLOCK]) tmp47 = tl.where(xmask, tmp45, 0) tmp48 = tl.sum(tmp47, 1)[:, None] tmp49 = tmp44 / tmp48 tl.store(out_ptr2 + (r1 + 16 * x0), tmp49, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_max_0[grid(4)](arg0_1, buf0, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) triton_poi_fused_constant_pad_nd_div_exp_relu_1[grid(576)](arg0_1, buf0, buf2, 576, XBLOCK=256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_avg_pool2d_constant_pad_nd_div_exp_relu_2[grid(256)]( buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_per_fused_div_exp_max_mul_relu_sum_3[grid(4)](arg0_1, buf0, buf3, buf6, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del buf0 del buf3 return buf6, class SoftDetectionModuleNew(nn.Module): def __init__(self, soft_local_max_size=3): super(SoftDetectionModuleNew, self).__init__() self.soft_local_max_size = soft_local_max_size self.pad = self.soft_local_max_size // 2 def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
imelekhov/d2-net
SoftDetectionModule
false
3,666
[ "BSD-3-Clause-Clear" ]
0
68a61797c40a4d6226c1774d84d97c4f493c9955
https://github.com/imelekhov/d2-net/tree/68a61797c40a4d6226c1774d84d97c4f493c9955
Bilinear
import torch from torch import Tensor from torch import nn as nn import torch.nn.functional as F class MonteCarloDropout(nn.Dropout): """ Defines Monte Carlo dropout Module as defined in the paper https://arxiv.org/pdf/1506.02142.pdf. In summary, This technique uses the regular dropout which can be interpreted as a Bayesian approximation of a well-known probabilistic model: the Gaussian process. We can treat the many different networks (with different neurons dropped out) as Monte Carlo samples from the space of all available models. This provides mathematical grounds to reason about the model’s uncertainty and, as it turns out, often improves its performance. """ mc_dropout_enabled: 'bool' = False def train(self, mode: 'bool'=True): if mode: self.mc_dropout_enabled = True def forward(self, input: 'Tensor') ->Tensor: return F.dropout(input, self.p, self.mc_dropout_enabled, self.inplace) class FeedForward(nn.Module): """ ## FFN module source [FeedForward network](https://arxiv.org/abs/2002.05202) """ def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1, activation=nn.ReLU(), is_gated: 'bool'=False, bias1: 'bool'=True, bias2: 'bool'=True, bias_gate: 'bool'=True): """ * `d_model` is the number of features in a token embedding * `d_ff` is the number of features in the hidden layer of the FFN * `dropout` is dropout probability for the hidden layer, compatible with Monte Carlo dropout at inference time * `is_gated` specifies whether the hidden layer is gated * `bias1` specified whether the first fully connected layer should have a learnable bias * `bias2` specified whether the second fully connected layer should have a learnable bias * `bias_gate` specified whether the fully connected layer for the gate should have a learnable bias """ super().__init__() self.layer1 = nn.Linear(d_model, d_ff, bias=bias1) self.layer2 = nn.Linear(d_ff, d_model, bias=bias2) self.dropout = MonteCarloDropout(dropout) self.activation = activation self.is_gated = is_gated if is_gated: self.linear_v = nn.Linear(d_model, d_ff, bias=bias_gate) def forward(self, x: 'torch.Tensor'): g = self.activation(self.layer1(x)) if self.is_gated: x = g * self.linear_v(x) else: x = g x = self.dropout(x) return self.layer2(x) class Bilinear(nn.Module): def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1): super().__init__() self.ffn = FeedForward(d_model, d_ff, dropout, nn.Identity(), True, False, False, False) def forward(self, x: 'torch.Tensor'): return self.ffn(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'd_ff': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import Tensor from torch import nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](buf0, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), buf0, buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4 class MonteCarloDropout(nn.Dropout): """ Defines Monte Carlo dropout Module as defined in the paper https://arxiv.org/pdf/1506.02142.pdf. In summary, This technique uses the regular dropout which can be interpreted as a Bayesian approximation of a well-known probabilistic model: the Gaussian process. We can treat the many different networks (with different neurons dropped out) as Monte Carlo samples from the space of all available models. This provides mathematical grounds to reason about the model’s uncertainty and, as it turns out, often improves its performance. """ mc_dropout_enabled: 'bool' = False def train(self, mode: 'bool'=True): if mode: self.mc_dropout_enabled = True def forward(self, input: 'Tensor') ->Tensor: return F.dropout(input, self.p, self.mc_dropout_enabled, self.inplace) class FeedForward(nn.Module): """ ## FFN module source [FeedForward network](https://arxiv.org/abs/2002.05202) """ def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1, activation=nn.ReLU(), is_gated: 'bool'=False, bias1: 'bool'=True, bias2: 'bool'=True, bias_gate: 'bool'=True): """ * `d_model` is the number of features in a token embedding * `d_ff` is the number of features in the hidden layer of the FFN * `dropout` is dropout probability for the hidden layer, compatible with Monte Carlo dropout at inference time * `is_gated` specifies whether the hidden layer is gated * `bias1` specified whether the first fully connected layer should have a learnable bias * `bias2` specified whether the second fully connected layer should have a learnable bias * `bias_gate` specified whether the fully connected layer for the gate should have a learnable bias """ super().__init__() self.layer1 = nn.Linear(d_model, d_ff, bias=bias1) self.layer2 = nn.Linear(d_ff, d_model, bias=bias2) self.dropout = MonteCarloDropout(dropout) self.activation = activation self.is_gated = is_gated if is_gated: self.linear_v = nn.Linear(d_model, d_ff, bias=bias_gate) def forward(self, x: 'torch.Tensor'): g = self.activation(self.layer1(x)) if self.is_gated: x = g * self.linear_v(x) else: x = g x = self.dropout(x) return self.layer2(x) class BilinearNew(nn.Module): def __init__(self, d_model: 'int', d_ff: 'int', dropout: 'float'=0.1): super().__init__() self.ffn = FeedForward(d_model, d_ff, dropout, nn.Identity(), True, False, False, False) def forward(self, input_0): primals_1 = self.ffn.layer1.weight primals_3 = self.ffn.layer2.weight primals_4 = self.ffn.linear_v.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
gdevos010/darts
Bilinear
false
3,667
[ "Apache-2.0" ]
0
96c97c1e241500ae7b91d32bbfa21d811e4a7d71
https://github.com/gdevos010/darts/tree/96c97c1e241500ae7b91d32bbfa21d811e4a7d71
AdaIN
import torch import torch.nn as nn class AdaIN(nn.Module): def __init__(self, style_dim, num_features): super().__init__() self.norm = nn.InstanceNorm2d(num_features, affine=False) self.fc = nn.Linear(style_dim, num_features * 2) def forward(self, x, s): h = self.fc(s) h = h.view(h.size(0), h.size(1), 1, 1) gamma, beta = torch.chunk(h, chunks=2, dim=1) return (1 + gamma) * self.norm(x) + beta def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'style_dim': 4, 'num_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__native_batch_norm_legit_add_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 x3 = xindex // 4 tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp22 = tl.load(in_ptr1 + (x2 + 8 * x3), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr1 + (4 + x2 + 8 * x3), xmask, eviction_policy= 'evict_last') tmp31 = tl.load(in_ptr2 + (4 + x2), xmask, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 16.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp24 = tmp22 + tmp23 tmp25 = 1.0 tmp26 = tmp24 + tmp25 tmp27 = tmp0 - tmp10 tmp28 = tmp27 * tmp21 tmp29 = tmp26 * tmp28 tmp32 = tmp30 + tmp31 tmp33 = tmp29 + tmp32 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(out_ptr1 + (r1 + 16 * x0), tmp33, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (8, 4), (4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf4 = reinterpret_tensor(buf2, (1, 16, 1, 1), (16, 1, 1, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused__native_batch_norm_legit_add_mul_0[grid(16)](buf4, primals_4, buf0, primals_2, buf1, buf5, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del buf0 del primals_2 return buf5, primals_3, primals_4, buf1, buf4 class AdaINNew(nn.Module): def __init__(self, style_dim, num_features): super().__init__() self.norm = nn.InstanceNorm2d(num_features, affine=False) self.fc = nn.Linear(style_dim, num_features * 2) def forward(self, input_0, input_1): primals_1 = self.fc.weight primals_2 = self.fc.bias primals_4 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
innerverz/CodeTemplate
AdaIN
false
3,668
[ "MIT" ]
0
a20f5d24b0b79871aa39b5cde33e3bb4d2507d13
https://github.com/innerverz/CodeTemplate/tree/a20f5d24b0b79871aa39b5cde33e3bb4d2507d13
TwoLayerCNN
import torch import torch.nn as nn import torch.nn.functional as F class TwoLayerCNN(nn.Module): def __init__(self, C, M, embedding, channel, mtc_input, *args, **kwargs): super(TwoLayerCNN, self).__init__() self.C = C self.M = M self.embedding = embedding self.mtc_input = C if mtc_input else 1 self.conv1 = nn.Conv1d(self.mtc_input, channel, 3, 1, padding=1, bias=False) self.flat_size = M // 2 * C // self.mtc_input * channel self.fc1 = nn.Linear(self.flat_size, embedding) def forward(self, x): N = len(x) x = x.view(-1, self.mtc_input, self.M) x = F.relu(self.conv1(x)) x = F.max_pool1d(x, 2) x = x.view(N, self.flat_size) x = self.fc1(x) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'C': 4, 'M': 4, 'embedding': 4, 'channel': 4, 'mtc_input': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 2 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + x0, tmp5, xmask) tl.store(out_ptr1 + x0, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3), (12, 3, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4), (16, 4, 1)) buf1 = buf0 del buf0 buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(64)](buf1, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 1, 2), (8, 2, 2, 1), torch.int8) buf3 = empty_strided_cuda((4, 4, 1, 2), (8, 2, 2, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_1[grid(32)](buf1, buf2, buf3, 32, XBLOCK=32, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(buf3, (4, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha =1, beta=1, out=buf4) del primals_4 return buf4, primals_2, primals_1, reinterpret_tensor(buf1, (4, 4, 1, 4 ), (16, 4, 4, 1), 0), buf2, reinterpret_tensor(buf3, (4, 8), (8, 1), 0 ), primals_3, buf5 class TwoLayerCNNNew(nn.Module): def __init__(self, C, M, embedding, channel, mtc_input, *args, **kwargs): super(TwoLayerCNNNew, self).__init__() self.C = C self.M = M self.embedding = embedding self.mtc_input = C if mtc_input else 1 self.conv1 = nn.Conv1d(self.mtc_input, channel, 3, 1, padding=1, bias=False) self.flat_size = M // 2 * C // self.mtc_input * channel self.fc1 = nn.Linear(self.flat_size, embedding) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.fc1.weight primals_4 = self.fc1.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
imvladikon/string-embed
TwoLayerCNN
false
3,669
[ "MIT" ]
0
49e5ab0ada37b497dac51974aff16eeac65627a0
https://github.com/imvladikon/string-embed/tree/49e5ab0ada37b497dac51974aff16eeac65627a0
ResBlk
import math import torch import torch.nn.functional as F import torch.nn as nn class ResBlk(nn.Module): def __init__(self, dim_in, dim_out, actv=nn.LeakyReLU(0.2), normalize= False, downsample=False): super().__init__() self.actv = actv self.normalize = normalize self.downsample = downsample self.learned_sc = dim_in != dim_out self._build_weights(dim_in, dim_out) def _build_weights(self, dim_in, dim_out): self.conv1 = nn.Conv2d(dim_in, dim_in, 3, 1, 1) self.conv2 = nn.Conv2d(dim_in, dim_out, 3, 1, 1) if self.normalize: self.norm1 = nn.InstanceNorm2d(dim_in, affine=True) self.norm2 = nn.InstanceNorm2d(dim_in, affine=True) if self.learned_sc: self.conv1x1 = nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False) def _shortcut(self, x): if self.learned_sc: x = self.conv1x1(x) if self.downsample: x = F.avg_pool2d(x, 2) return x def _residual(self, x): if self.normalize: x = self.norm1(x) x = self.actv(x) x = self.conv1(x) if self.downsample: x = F.avg_pool2d(x, 2) if self.normalize: x = self.norm2(x) x = self.actv(x) x = self.conv2(x) return x def forward(self, x): x = self._shortcut(x) + self._residual(x) return x / math.sqrt(2) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim_in': 4, 'dim_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr1 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_add_convolution_div_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_out_ptr0 + x3, xmask) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp5 = 0.7071067811865475 tmp6 = tmp4 * tmp5 tl.store(in_out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_1[grid(256)](buf1, primals_3, buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del primals_3 buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_add_convolution_div_2[grid(256)](buf5, primals_1, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_5 return buf5, primals_2, primals_4, buf0, buf2, buf3 class ResBlkNew(nn.Module): def __init__(self, dim_in, dim_out, actv=nn.LeakyReLU(0.2), normalize= False, downsample=False): super().__init__() self.actv = actv self.normalize = normalize self.downsample = downsample self.learned_sc = dim_in != dim_out self._build_weights(dim_in, dim_out) def _build_weights(self, dim_in, dim_out): self.conv1 = nn.Conv2d(dim_in, dim_in, 3, 1, 1) self.conv2 = nn.Conv2d(dim_in, dim_out, 3, 1, 1) if self.normalize: self.norm1 = nn.InstanceNorm2d(dim_in, affine=True) self.norm2 = nn.InstanceNorm2d(dim_in, affine=True) if self.learned_sc: self.conv1x1 = nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False) def _shortcut(self, x): if self.learned_sc: x = self.conv1x1(x) if self.downsample: x = F.avg_pool2d(x, 2) return x def _residual(self, x): if self.normalize: x = self.norm1(x) x = self.actv(x) x = self.conv1(x) if self.downsample: x = F.avg_pool2d(x, 2) if self.normalize: x = self.norm2(x) x = self.actv(x) x = self.conv2(x) return x def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
innerverz/CodeTemplate
ResBlk
false
3,670
[ "MIT" ]
0
a20f5d24b0b79871aa39b5cde33e3bb4d2507d13
https://github.com/innerverz/CodeTemplate/tree/a20f5d24b0b79871aa39b5cde33e3bb4d2507d13
_GatedResidualNetwork
import torch from torch import Tensor from torch import nn as nn import torch.nn.functional as F class MonteCarloDropout(nn.Dropout): """ Defines Monte Carlo dropout Module as defined in the paper https://arxiv.org/pdf/1506.02142.pdf. In summary, This technique uses the regular dropout which can be interpreted as a Bayesian approximation of a well-known probabilistic model: the Gaussian process. We can treat the many different networks (with different neurons dropped out) as Monte Carlo samples from the space of all available models. This provides mathematical grounds to reason about the model’s uncertainty and, as it turns out, often improves its performance. """ mc_dropout_enabled: 'bool' = False def train(self, mode: 'bool'=True): if mode: self.mc_dropout_enabled = True def forward(self, input: 'Tensor') ->Tensor: return F.dropout(input, self.p, self.mc_dropout_enabled, self.inplace) class _TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_first self.trainable = trainable if self.trainable: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float32)) self.gate = nn.Sigmoid() def interpolate(self, x): upsampled = F.interpolate(x.unsqueeze(1), self.output_size, mode= 'linear', align_corners=True).squeeze(1) if self.trainable: upsampled = upsampled * self.gate(self.mask.unsqueeze(0)) * 2.0 return upsampled def forward(self, x): if len(x.size()) <= 2: return self.interpolate(x) x_reshape = x.contiguous().view(-1, x.size(-1)) y = self.interpolate(x_reshape) if self.batch_first: y = y.contiguous().view(x.size(0), -1, y.size(-1)) else: y = y.view(-1, x.size(1), y.size(-1)) return y class _GatedLinearUnit(nn.Module): """Gated Linear Unit""" def __init__(self, input_size: 'int', hidden_size: 'int'=None, dropout: 'float'=None): super().__init__() if dropout is not None: self.dropout = MonteCarloDropout(dropout) else: self.dropout = dropout self.hidden_size = hidden_size or input_size self.fc = nn.Linear(input_size, self.hidden_size * 2) self.init_weights() def init_weights(self): for n, p in self.named_parameters(): if 'bias' in n: torch.nn.init.zeros_(p) elif 'fc' in n: torch.nn.init.xavier_uniform_(p) def forward(self, x): if self.dropout is not None: x = self.dropout(x) x = self.fc(x) x = F.glu(x, dim=-1) return x class _ResampleNorm(nn.Module): def __init__(self, input_size: 'int', output_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.output_size = output_size or input_size if self.input_size != self.output_size: self.resample = _TimeDistributedInterpolation(self.output_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.output_size) def forward(self, x: 'torch.Tensor') ->torch.Tensor: if self.input_size != self.output_size: x = self.resample(x) if self.trainable_add: x = x * self.gate(self.mask) * 2.0 output = self.norm(x) return output class _AddNorm(nn.Module): def __init__(self, input_size: 'int', skip_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.skip_size = skip_size or input_size if self.input_size != self.skip_size: self.resample = _TimeDistributedInterpolation(self.input_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.input_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.input_size) def forward(self, x: 'torch.Tensor', skip: 'torch.Tensor'): if self.input_size != self.skip_size: skip = self.resample(skip) if self.trainable_add: skip = skip * self.gate(self.mask) * 2.0 output = self.norm(x + skip) return output class _GateAddNorm(nn.Module): def __init__(self, input_size: 'int', hidden_size: 'int'=None, skip_size: 'int'=None, trainable_add: 'bool'=False, dropout: 'float'=None): super().__init__() self.input_size = input_size self.hidden_size = hidden_size or input_size self.skip_size = skip_size or self.hidden_size self.dropout = dropout self.glu = _GatedLinearUnit(self.input_size, hidden_size=self. hidden_size, dropout=self.dropout) self.add_norm = _AddNorm(self.hidden_size, skip_size=self.skip_size, trainable_add=trainable_add) def forward(self, x, skip): output = self.glu(x) output = self.add_norm(output, skip) return output class _GatedResidualNetwork(nn.Module): def __init__(self, input_size: 'int', hidden_size: 'int', output_size: 'int', dropout: 'float'=0.1, context_size: 'int'=None, residual: 'bool'=False): super().__init__() self.input_size = input_size self.output_size = output_size self.context_size = context_size self.hidden_size = hidden_size self.dropout = dropout self.residual = residual if self.input_size != self.output_size and not self.residual: residual_size = self.input_size else: residual_size = self.output_size if self.output_size != residual_size: self.resample_norm = _ResampleNorm(residual_size, self.output_size) self.fc1 = nn.Linear(self.input_size, self.hidden_size) self.elu = nn.ELU() if self.context_size is not None: self.context = nn.Linear(self.context_size, self.hidden_size, bias=False) self.fc2 = nn.Linear(self.hidden_size, self.hidden_size) self.init_weights() self.gate_norm = _GateAddNorm(input_size=self.hidden_size, skip_size=self.output_size, hidden_size=self.output_size, dropout=self.dropout, trainable_add=False) def init_weights(self): for name, p in self.named_parameters(): if 'bias' in name: torch.nn.init.zeros_(p) elif 'fc1' in name or 'fc2' in name: torch.nn.init.kaiming_normal_(p, a=0, mode='fan_in', nonlinearity='leaky_relu') elif 'context' in name: torch.nn.init.xavier_uniform_(p) def forward(self, x, context=None, residual=None): if residual is None: residual = x if self.input_size != self.output_size and not self.residual: residual = self.resample_norm(residual) x = self.fc1(x) if context is not None: context = self.context(context) x = x + context x = self.elu(x) x = self.fc2(x) x = self.gate_norm(x, residual) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import Tensor from torch import nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_poi_fused_glu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (8, 4), (4, 1)) assert_size_stride(primals_7, (8,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_elu_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.addmm(primals_7, buf2, reinterpret_tensor(primals_6, (4, 8), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_glu_1[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_add_native_layer_norm_2[grid(64)](buf4, primals_1, buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_3[grid(256)](buf4, primals_1, buf5, buf6, primals_8, primals_9, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf5 del buf6 del primals_9 return buf7, primals_1, primals_8, buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, reinterpret_tensor(buf3, (4, 4, 4, 8), (128, 32, 8, 1), 0), buf4, primals_6, primals_4 class MonteCarloDropout(nn.Dropout): """ Defines Monte Carlo dropout Module as defined in the paper https://arxiv.org/pdf/1506.02142.pdf. In summary, This technique uses the regular dropout which can be interpreted as a Bayesian approximation of a well-known probabilistic model: the Gaussian process. We can treat the many different networks (with different neurons dropped out) as Monte Carlo samples from the space of all available models. This provides mathematical grounds to reason about the model’s uncertainty and, as it turns out, often improves its performance. """ mc_dropout_enabled: 'bool' = False def train(self, mode: 'bool'=True): if mode: self.mc_dropout_enabled = True def forward(self, input: 'Tensor') ->Tensor: return F.dropout(input, self.p, self.mc_dropout_enabled, self.inplace) class _TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_first self.trainable = trainable if self.trainable: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float32)) self.gate = nn.Sigmoid() def interpolate(self, x): upsampled = F.interpolate(x.unsqueeze(1), self.output_size, mode= 'linear', align_corners=True).squeeze(1) if self.trainable: upsampled = upsampled * self.gate(self.mask.unsqueeze(0)) * 2.0 return upsampled def forward(self, x): if len(x.size()) <= 2: return self.interpolate(x) x_reshape = x.contiguous().view(-1, x.size(-1)) y = self.interpolate(x_reshape) if self.batch_first: y = y.contiguous().view(x.size(0), -1, y.size(-1)) else: y = y.view(-1, x.size(1), y.size(-1)) return y class _GatedLinearUnit(nn.Module): """Gated Linear Unit""" def __init__(self, input_size: 'int', hidden_size: 'int'=None, dropout: 'float'=None): super().__init__() if dropout is not None: self.dropout = MonteCarloDropout(dropout) else: self.dropout = dropout self.hidden_size = hidden_size or input_size self.fc = nn.Linear(input_size, self.hidden_size * 2) self.init_weights() def init_weights(self): for n, p in self.named_parameters(): if 'bias' in n: torch.nn.init.zeros_(p) elif 'fc' in n: torch.nn.init.xavier_uniform_(p) def forward(self, x): if self.dropout is not None: x = self.dropout(x) x = self.fc(x) x = F.glu(x, dim=-1) return x class _ResampleNorm(nn.Module): def __init__(self, input_size: 'int', output_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.output_size = output_size or input_size if self.input_size != self.output_size: self.resample = _TimeDistributedInterpolation(self.output_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.output_size) def forward(self, x: 'torch.Tensor') ->torch.Tensor: if self.input_size != self.output_size: x = self.resample(x) if self.trainable_add: x = x * self.gate(self.mask) * 2.0 output = self.norm(x) return output class _AddNorm(nn.Module): def __init__(self, input_size: 'int', skip_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.skip_size = skip_size or input_size if self.input_size != self.skip_size: self.resample = _TimeDistributedInterpolation(self.input_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.input_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.input_size) def forward(self, x: 'torch.Tensor', skip: 'torch.Tensor'): if self.input_size != self.skip_size: skip = self.resample(skip) if self.trainable_add: skip = skip * self.gate(self.mask) * 2.0 output = self.norm(x + skip) return output class _GateAddNorm(nn.Module): def __init__(self, input_size: 'int', hidden_size: 'int'=None, skip_size: 'int'=None, trainable_add: 'bool'=False, dropout: 'float'=None): super().__init__() self.input_size = input_size self.hidden_size = hidden_size or input_size self.skip_size = skip_size or self.hidden_size self.dropout = dropout self.glu = _GatedLinearUnit(self.input_size, hidden_size=self. hidden_size, dropout=self.dropout) self.add_norm = _AddNorm(self.hidden_size, skip_size=self.skip_size, trainable_add=trainable_add) def forward(self, x, skip): output = self.glu(x) output = self.add_norm(output, skip) return output class _GatedResidualNetworkNew(nn.Module): def __init__(self, input_size: 'int', hidden_size: 'int', output_size: 'int', dropout: 'float'=0.1, context_size: 'int'=None, residual: 'bool'=False): super().__init__() self.input_size = input_size self.output_size = output_size self.context_size = context_size self.hidden_size = hidden_size self.dropout = dropout self.residual = residual if self.input_size != self.output_size and not self.residual: residual_size = self.input_size else: residual_size = self.output_size if self.output_size != residual_size: self.resample_norm = _ResampleNorm(residual_size, self.output_size) self.fc1 = nn.Linear(self.input_size, self.hidden_size) self.elu = nn.ELU() if self.context_size is not None: self.context = nn.Linear(self.context_size, self.hidden_size, bias=False) self.fc2 = nn.Linear(self.hidden_size, self.hidden_size) self.init_weights() self.gate_norm = _GateAddNorm(input_size=self.hidden_size, skip_size=self.output_size, hidden_size=self.output_size, dropout=self.dropout, trainable_add=False) def init_weights(self): for name, p in self.named_parameters(): if 'bias' in name: torch.nn.init.zeros_(p) elif 'fc1' in name or 'fc2' in name: torch.nn.init.kaiming_normal_(p, a=0, mode='fan_in', nonlinearity='leaky_relu') elif 'context' in name: torch.nn.init.xavier_uniform_(p) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.gate_norm.glu.fc.weight primals_7 = self.gate_norm.glu.fc.bias primals_8 = self.gate_norm.add_norm.norm.weight primals_9 = self.gate_norm.add_norm.norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
gdevos010/darts
_GatedResidualNetwork
false
3,671
[ "Apache-2.0" ]
0
96c97c1e241500ae7b91d32bbfa21d811e4a7d71
https://github.com/gdevos010/darts/tree/96c97c1e241500ae7b91d32bbfa21d811e4a7d71
ApplyStyle
import torch import torch.nn as nn class ApplyStyle(nn.Module): """ @ref: https://github.com/lernapparat/lernapparat/blob/master/style_gan/pytorch_style_gan.ipynb """ def __init__(self, latent_size, channels): super(ApplyStyle, self).__init__() self.linear = nn.Linear(latent_size, channels * 2) def forward(self, x, latent): style = self.linear(latent) shape = [-1, 2, x.size(1), 1, 1] style = style.view(shape) x = x * (style[:, 0] * 1 + 1.0) + style[:, 1] * 1 return x def get_inputs(): return [torch.rand([64, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'latent_size': 4, 'channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 4 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (x1 + 8 * x2), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (4 + x1 + 8 * x2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr2 + (4 + x1), None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 + tmp4 tmp7 = tmp0 * tmp6 tmp10 = tmp8 + tmp9 tmp11 = tmp10 * tmp4 tmp12 = tmp7 + tmp11 tl.store(out_ptr0 + x3, tmp12, None) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (8, 4), (4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(4096)](primals_4, buf0, primals_2, buf1, 4096, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 return buf1, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class ApplyStyleNew(nn.Module): """ @ref: https://github.com/lernapparat/lernapparat/blob/master/style_gan/pytorch_style_gan.ipynb """ def __init__(self, latent_size, channels): super(ApplyStyleNew, self).__init__() self.linear = nn.Linear(latent_size, channels * 2) def forward(self, input_0, input_1): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_4 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
innerverz/CodeTemplate
ApplyStyle
false
3,672
[ "MIT" ]
0
a20f5d24b0b79871aa39b5cde33e3bb4d2507d13
https://github.com/innerverz/CodeTemplate/tree/a20f5d24b0b79871aa39b5cde33e3bb4d2507d13
_GateAddNorm
import torch from torch import Tensor from torch import nn as nn import torch.nn.functional as F class MonteCarloDropout(nn.Dropout): """ Defines Monte Carlo dropout Module as defined in the paper https://arxiv.org/pdf/1506.02142.pdf. In summary, This technique uses the regular dropout which can be interpreted as a Bayesian approximation of a well-known probabilistic model: the Gaussian process. We can treat the many different networks (with different neurons dropped out) as Monte Carlo samples from the space of all available models. This provides mathematical grounds to reason about the model’s uncertainty and, as it turns out, often improves its performance. """ mc_dropout_enabled: 'bool' = False def train(self, mode: 'bool'=True): if mode: self.mc_dropout_enabled = True def forward(self, input: 'Tensor') ->Tensor: return F.dropout(input, self.p, self.mc_dropout_enabled, self.inplace) class _TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_first self.trainable = trainable if self.trainable: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float32)) self.gate = nn.Sigmoid() def interpolate(self, x): upsampled = F.interpolate(x.unsqueeze(1), self.output_size, mode= 'linear', align_corners=True).squeeze(1) if self.trainable: upsampled = upsampled * self.gate(self.mask.unsqueeze(0)) * 2.0 return upsampled def forward(self, x): if len(x.size()) <= 2: return self.interpolate(x) x_reshape = x.contiguous().view(-1, x.size(-1)) y = self.interpolate(x_reshape) if self.batch_first: y = y.contiguous().view(x.size(0), -1, y.size(-1)) else: y = y.view(-1, x.size(1), y.size(-1)) return y class _GatedLinearUnit(nn.Module): """Gated Linear Unit""" def __init__(self, input_size: 'int', hidden_size: 'int'=None, dropout: 'float'=None): super().__init__() if dropout is not None: self.dropout = MonteCarloDropout(dropout) else: self.dropout = dropout self.hidden_size = hidden_size or input_size self.fc = nn.Linear(input_size, self.hidden_size * 2) self.init_weights() def init_weights(self): for n, p in self.named_parameters(): if 'bias' in n: torch.nn.init.zeros_(p) elif 'fc' in n: torch.nn.init.xavier_uniform_(p) def forward(self, x): if self.dropout is not None: x = self.dropout(x) x = self.fc(x) x = F.glu(x, dim=-1) return x class _AddNorm(nn.Module): def __init__(self, input_size: 'int', skip_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.skip_size = skip_size or input_size if self.input_size != self.skip_size: self.resample = _TimeDistributedInterpolation(self.input_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.input_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.input_size) def forward(self, x: 'torch.Tensor', skip: 'torch.Tensor'): if self.input_size != self.skip_size: skip = self.resample(skip) if self.trainable_add: skip = skip * self.gate(self.mask) * 2.0 output = self.norm(x + skip) return output class _GateAddNorm(nn.Module): def __init__(self, input_size: 'int', hidden_size: 'int'=None, skip_size: 'int'=None, trainable_add: 'bool'=False, dropout: 'float'=None): super().__init__() self.input_size = input_size self.hidden_size = hidden_size or input_size self.skip_size = skip_size or self.hidden_size self.dropout = dropout self.glu = _GatedLinearUnit(self.input_size, hidden_size=self. hidden_size, dropout=self.dropout) self.add_norm = _AddNorm(self.hidden_size, skip_size=self.skip_size, trainable_add=trainable_add) def forward(self, x, skip): output = self.glu(x) output = self.add_norm(output, skip) return output def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import Tensor from torch import nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_glu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask) tmp4 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x2, tmp5, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (8, 4), (4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_glu_0[grid(256)](buf0, primals_4, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](buf1, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_2[grid(256)](buf1, buf2, buf3, primals_5, primals_6, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del buf3 del primals_6 return buf4, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf0, (4, 4, 4, 8), (128, 32, 8, 1), 0), buf1 class MonteCarloDropout(nn.Dropout): """ Defines Monte Carlo dropout Module as defined in the paper https://arxiv.org/pdf/1506.02142.pdf. In summary, This technique uses the regular dropout which can be interpreted as a Bayesian approximation of a well-known probabilistic model: the Gaussian process. We can treat the many different networks (with different neurons dropped out) as Monte Carlo samples from the space of all available models. This provides mathematical grounds to reason about the model’s uncertainty and, as it turns out, often improves its performance. """ mc_dropout_enabled: 'bool' = False def train(self, mode: 'bool'=True): if mode: self.mc_dropout_enabled = True def forward(self, input: 'Tensor') ->Tensor: return F.dropout(input, self.p, self.mc_dropout_enabled, self.inplace) class _TimeDistributedInterpolation(nn.Module): def __init__(self, output_size: 'int', batch_first: 'bool'=False, trainable: 'bool'=False): super().__init__() self.output_size = output_size self.batch_first = batch_first self.trainable = trainable if self.trainable: self.mask = nn.Parameter(torch.zeros(self.output_size, dtype= torch.float32)) self.gate = nn.Sigmoid() def interpolate(self, x): upsampled = F.interpolate(x.unsqueeze(1), self.output_size, mode= 'linear', align_corners=True).squeeze(1) if self.trainable: upsampled = upsampled * self.gate(self.mask.unsqueeze(0)) * 2.0 return upsampled def forward(self, x): if len(x.size()) <= 2: return self.interpolate(x) x_reshape = x.contiguous().view(-1, x.size(-1)) y = self.interpolate(x_reshape) if self.batch_first: y = y.contiguous().view(x.size(0), -1, y.size(-1)) else: y = y.view(-1, x.size(1), y.size(-1)) return y class _GatedLinearUnit(nn.Module): """Gated Linear Unit""" def __init__(self, input_size: 'int', hidden_size: 'int'=None, dropout: 'float'=None): super().__init__() if dropout is not None: self.dropout = MonteCarloDropout(dropout) else: self.dropout = dropout self.hidden_size = hidden_size or input_size self.fc = nn.Linear(input_size, self.hidden_size * 2) self.init_weights() def init_weights(self): for n, p in self.named_parameters(): if 'bias' in n: torch.nn.init.zeros_(p) elif 'fc' in n: torch.nn.init.xavier_uniform_(p) def forward(self, x): if self.dropout is not None: x = self.dropout(x) x = self.fc(x) x = F.glu(x, dim=-1) return x class _AddNorm(nn.Module): def __init__(self, input_size: 'int', skip_size: 'int'=None, trainable_add: 'bool'=True): super().__init__() self.input_size = input_size self.trainable_add = trainable_add self.skip_size = skip_size or input_size if self.input_size != self.skip_size: self.resample = _TimeDistributedInterpolation(self.input_size, batch_first=True, trainable=False) if self.trainable_add: self.mask = nn.Parameter(torch.zeros(self.input_size, dtype= torch.float)) self.gate = nn.Sigmoid() self.norm = nn.LayerNorm(self.input_size) def forward(self, x: 'torch.Tensor', skip: 'torch.Tensor'): if self.input_size != self.skip_size: skip = self.resample(skip) if self.trainable_add: skip = skip * self.gate(self.mask) * 2.0 output = self.norm(x + skip) return output class _GateAddNormNew(nn.Module): def __init__(self, input_size: 'int', hidden_size: 'int'=None, skip_size: 'int'=None, trainable_add: 'bool'=False, dropout: 'float'=None): super().__init__() self.input_size = input_size self.hidden_size = hidden_size or input_size self.skip_size = skip_size or self.hidden_size self.dropout = dropout self.glu = _GatedLinearUnit(self.input_size, hidden_size=self. hidden_size, dropout=self.dropout) self.add_norm = _AddNorm(self.hidden_size, skip_size=self.skip_size, trainable_add=trainable_add) def forward(self, input_0, input_1): primals_1 = self.glu.fc.weight primals_2 = self.glu.fc.bias primals_5 = self.add_norm.norm.weight primals_6 = self.add_norm.norm.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
gdevos010/darts
_GateAddNorm
false
3,673
[ "Apache-2.0" ]
0
96c97c1e241500ae7b91d32bbfa21d811e4a7d71
https://github.com/gdevos010/darts/tree/96c97c1e241500ae7b91d32bbfa21d811e4a7d71
GAT
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class GraphAttentionLayer(nn.Module): """ Simple GAT layer, similar to https://arxiv.org/abs/1710.10903 """ def __init__(self, in_features, out_features, dropout, alpha, concat=True): super(GraphAttentionLayer, self).__init__() self.dropout = dropout self.in_features = in_features self.out_features = out_features self.alpha = alpha self.concat = concat self.W = nn.Parameter(nn.init.xavier_uniform_(torch.FloatTensor( in_features, out_features).type(torch.FloatTensor if torch.cuda .is_available() else torch.FloatTensor), gain=np.sqrt(2.0)), requires_grad=True) self.a1 = nn.Parameter(nn.init.xavier_uniform_(torch.FloatTensor( out_features, 1).type(torch.FloatTensor if torch.cuda. is_available() else torch.FloatTensor), gain=np.sqrt(2.0)), requires_grad=True) self.a2 = nn.Parameter(nn.init.xavier_uniform_(torch.FloatTensor( out_features, 1).type(torch.FloatTensor if torch.cuda. is_available() else torch.FloatTensor), gain=np.sqrt(2.0)), requires_grad=True) self.leakyrelu = nn.LeakyReLU(self.alpha) def forward(self, input, adj): h = torch.mm(input, self.W) h.size()[0] f_1 = h @ self.a1 f_2 = h @ self.a2 e = self.leakyrelu(f_1 + f_2.transpose(0, 1)) zero_vec = -9000000000000000.0 * torch.ones_like(e) attention = torch.where(adj > 0, e, zero_vec) attention = F.softmax(attention, dim=1) attention = F.dropout(attention, self.dropout, training=self.training) h_prime = torch.matmul(attention, h) if self.concat: return F.sigmoid(h_prime) else: return h_prime def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GAT(nn.Module): def __init__(self, nfeat, nhid, dropout, alpha, nheads): super(GAT, self).__init__() self.dropout = dropout self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)] for i, attention in enumerate(self.attentions): self.add_module('attention_{}'.format(i), attention) def forward(self, x, adj): x = F.dropout(x, self.dropout, training=self.training) x = torch.cat([att(x, adj) for att in self.attentions], dim=1) x = F.dropout(x, self.dropout, training=self.training) return x def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'nfeat': 4, 'nhid': 4, 'dropout': 0.5, 'alpha': 4, 'nheads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_gt_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_add_leaky_relu_mul_where_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_ptr2 + x0, xmask) tmp3 = tl.load(in_ptr3 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp11 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp12 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp13 = tl.load(in_ptr3 + 1) tmp14 = tl.broadcast_to(tmp13, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp21 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp22 = tl.load(in_ptr3 + 2) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp29 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp30 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp31 = tl.load(in_ptr3 + 3) tmp32 = tl.broadcast_to(tmp31, [XBLOCK]) tmp38 = tl.load(in_ptr4 + 4 * x0, xmask, eviction_policy='evict_last').to( tl.int1) tmp39 = tl.load(in_ptr5 + x0, xmask) tmp40 = tl.load(in_ptr6 + 0) tmp41 = tl.broadcast_to(tmp40, [XBLOCK]) tmp46 = tl.load(in_ptr4 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp47 = tl.load(in_ptr6 + 1) tmp48 = tl.broadcast_to(tmp47, [XBLOCK]) tmp54 = tl.load(in_ptr4 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp55 = tl.load(in_ptr6 + 2) tmp56 = tl.broadcast_to(tmp55, [XBLOCK]) tmp62 = tl.load(in_ptr4 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp63 = tl.load(in_ptr6 + 3) tmp64 = tl.broadcast_to(tmp63, [XBLOCK]) tmp70 = tl.load(in_ptr7 + 4 * x0, xmask, eviction_policy='evict_last').to( tl.int1) tmp71 = tl.load(in_ptr8 + x0, xmask) tmp72 = tl.load(in_ptr9 + 0) tmp73 = tl.broadcast_to(tmp72, [XBLOCK]) tmp78 = tl.load(in_ptr7 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp79 = tl.load(in_ptr9 + 1) tmp80 = tl.broadcast_to(tmp79, [XBLOCK]) tmp86 = tl.load(in_ptr7 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp87 = tl.load(in_ptr9 + 2) tmp88 = tl.broadcast_to(tmp87, [XBLOCK]) tmp94 = tl.load(in_ptr7 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp95 = tl.load(in_ptr9 + 3) tmp96 = tl.broadcast_to(tmp95, [XBLOCK]) tmp102 = tl.load(in_ptr10 + 4 * x0, xmask, eviction_policy='evict_last' ).to(tl.int1) tmp103 = tl.load(in_ptr11 + x0, xmask) tmp104 = tl.load(in_ptr12 + 0) tmp105 = tl.broadcast_to(tmp104, [XBLOCK]) tmp110 = tl.load(in_ptr10 + (1 + 4 * x0), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp111 = tl.load(in_ptr12 + 1) tmp112 = tl.broadcast_to(tmp111, [XBLOCK]) tmp118 = tl.load(in_ptr10 + (2 + 4 * x0), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp119 = tl.load(in_ptr12 + 2) tmp120 = tl.broadcast_to(tmp119, [XBLOCK]) tmp126 = tl.load(in_ptr10 + (3 + 4 * x0), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp127 = tl.load(in_ptr12 + 3) tmp128 = tl.broadcast_to(tmp127, [XBLOCK]) tmp5 = tmp2 + tmp4 tmp6 = 4.0 tmp7 = tmp5 * tmp6 tmp8 = tl.where(tmp1, tmp5, tmp7) tmp9 = -8999999815811072.0 tmp10 = tl.where(tmp0, tmp8, tmp9) tmp15 = tmp2 + tmp14 tmp16 = tmp15 * tmp6 tmp17 = tl.where(tmp12, tmp15, tmp16) tmp18 = tl.where(tmp11, tmp17, tmp9) tmp19 = triton_helpers.maximum(tmp10, tmp18) tmp24 = tmp2 + tmp23 tmp25 = tmp24 * tmp6 tmp26 = tl.where(tmp21, tmp24, tmp25) tmp27 = tl.where(tmp20, tmp26, tmp9) tmp28 = triton_helpers.maximum(tmp19, tmp27) tmp33 = tmp2 + tmp32 tmp34 = tmp33 * tmp6 tmp35 = tl.where(tmp30, tmp33, tmp34) tmp36 = tl.where(tmp29, tmp35, tmp9) tmp37 = triton_helpers.maximum(tmp28, tmp36) tmp42 = tmp39 + tmp41 tmp43 = tmp42 * tmp6 tmp44 = tl.where(tmp38, tmp42, tmp43) tmp45 = tl.where(tmp0, tmp44, tmp9) tmp49 = tmp39 + tmp48 tmp50 = tmp49 * tmp6 tmp51 = tl.where(tmp46, tmp49, tmp50) tmp52 = tl.where(tmp11, tmp51, tmp9) tmp53 = triton_helpers.maximum(tmp45, tmp52) tmp57 = tmp39 + tmp56 tmp58 = tmp57 * tmp6 tmp59 = tl.where(tmp54, tmp57, tmp58) tmp60 = tl.where(tmp20, tmp59, tmp9) tmp61 = triton_helpers.maximum(tmp53, tmp60) tmp65 = tmp39 + tmp64 tmp66 = tmp65 * tmp6 tmp67 = tl.where(tmp62, tmp65, tmp66) tmp68 = tl.where(tmp29, tmp67, tmp9) tmp69 = triton_helpers.maximum(tmp61, tmp68) tmp74 = tmp71 + tmp73 tmp75 = tmp74 * tmp6 tmp76 = tl.where(tmp70, tmp74, tmp75) tmp77 = tl.where(tmp0, tmp76, tmp9) tmp81 = tmp71 + tmp80 tmp82 = tmp81 * tmp6 tmp83 = tl.where(tmp78, tmp81, tmp82) tmp84 = tl.where(tmp11, tmp83, tmp9) tmp85 = triton_helpers.maximum(tmp77, tmp84) tmp89 = tmp71 + tmp88 tmp90 = tmp89 * tmp6 tmp91 = tl.where(tmp86, tmp89, tmp90) tmp92 = tl.where(tmp20, tmp91, tmp9) tmp93 = triton_helpers.maximum(tmp85, tmp92) tmp97 = tmp71 + tmp96 tmp98 = tmp97 * tmp6 tmp99 = tl.where(tmp94, tmp97, tmp98) tmp100 = tl.where(tmp29, tmp99, tmp9) tmp101 = triton_helpers.maximum(tmp93, tmp100) tmp106 = tmp103 + tmp105 tmp107 = tmp106 * tmp6 tmp108 = tl.where(tmp102, tmp106, tmp107) tmp109 = tl.where(tmp0, tmp108, tmp9) tmp113 = tmp103 + tmp112 tmp114 = tmp113 * tmp6 tmp115 = tl.where(tmp110, tmp113, tmp114) tmp116 = tl.where(tmp11, tmp115, tmp9) tmp117 = triton_helpers.maximum(tmp109, tmp116) tmp121 = tmp103 + tmp120 tmp122 = tmp121 * tmp6 tmp123 = tl.where(tmp118, tmp121, tmp122) tmp124 = tl.where(tmp20, tmp123, tmp9) tmp125 = triton_helpers.maximum(tmp117, tmp124) tmp129 = tmp103 + tmp128 tmp130 = tmp129 * tmp6 tmp131 = tl.where(tmp126, tmp129, tmp130) tmp132 = tl.where(tmp29, tmp131, tmp9) tmp133 = triton_helpers.maximum(tmp125, tmp132) tl.store(out_ptr0 + x0, tmp37, xmask) tl.store(out_ptr1 + x0, tmp69, xmask) tl.store(out_ptr2 + x0, tmp101, xmask) tl.store(out_ptr3 + x0, tmp133, xmask) @triton.jit def triton_poi_fused__softmax_add_leaky_relu_mul_where_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + x2, xmask).to(tl.int1) tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + x2, xmask).to(tl.int1) tmp14 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr7 + x0, xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr8 + x1, xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr9 + x2, xmask).to(tl.int1) tmp24 = tl.load(in_ptr10 + x1, xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr11 + x0, xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr12 + x1, xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr13 + x2, xmask).to(tl.int1) tmp34 = tl.load(in_ptr14 + x1, xmask, eviction_policy='evict_last') tmp35 = tl.load(in_ptr15 + x0, xmask, eviction_policy='evict_last') tmp40 = tl.load(in_ptr16 + x1, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp5 = 4.0 tmp6 = tmp4 * tmp5 tmp7 = tl.where(tmp1, tmp4, tmp6) tmp8 = -8999999815811072.0 tmp9 = tl.where(tmp0, tmp7, tmp8) tmp11 = tmp9 - tmp10 tmp12 = tl_math.exp(tmp11) tmp16 = tmp14 + tmp15 tmp17 = tmp16 * tmp5 tmp18 = tl.where(tmp13, tmp16, tmp17) tmp19 = tl.where(tmp0, tmp18, tmp8) tmp21 = tmp19 - tmp20 tmp22 = tl_math.exp(tmp21) tmp26 = tmp24 + tmp25 tmp27 = tmp26 * tmp5 tmp28 = tl.where(tmp23, tmp26, tmp27) tmp29 = tl.where(tmp0, tmp28, tmp8) tmp31 = tmp29 - tmp30 tmp32 = tl_math.exp(tmp31) tmp36 = tmp34 + tmp35 tmp37 = tmp36 * tmp5 tmp38 = tl.where(tmp33, tmp36, tmp37) tmp39 = tl.where(tmp0, tmp38, tmp8) tmp41 = tmp39 - tmp40 tmp42 = tl_math.exp(tmp41) tl.store(out_ptr0 + x2, tmp12, xmask) tl.store(out_ptr1 + x2, tmp22, xmask) tl.store(out_ptr2 + x2, tmp32, xmask) tl.store(out_ptr3 + x2, tmp42, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.sigmoid(tmp5) tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype) tmp8 = tl.where(tmp4, tmp6, tmp7) tmp9 = tmp0 >= tmp3 tmp10 = tl.full([1], 8, tl.int64) tmp11 = tmp0 < tmp10 tmp12 = tmp9 & tmp11 tmp13 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.sigmoid(tmp13) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp12, tmp14, tmp15) tmp17 = tmp0 >= tmp10 tmp18 = tl.full([1], 12, tl.int64) tmp19 = tmp0 < tmp18 tmp20 = tmp17 & tmp19 tmp21 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp22 = tl.sigmoid(tmp21) tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype) tmp24 = tl.where(tmp20, tmp22, tmp23) tmp25 = tmp0 >= tmp18 tl.full([1], 16, tl.int64) tmp28 = tl.load(in_ptr3 + (4 * x1 + (-12 + x0)), tmp25 & xmask, eviction_policy='evict_last', other=0.0) tmp29 = tl.sigmoid(tmp28) tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype) tmp31 = tl.where(tmp25, tmp29, tmp30) tmp32 = tl.where(tmp20, tmp24, tmp31) tmp33 = tl.where(tmp12, tmp16, tmp32) tmp34 = tl.where(tmp4, tmp8, tmp33) tl.store(out_ptr0 + x2, tmp34, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 1), (1, 1)) assert_size_stride(primals_4, (4, 1), (1, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, 1), (1, 1)) assert_size_stride(primals_8, (4, 1), (1, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, 1), (1, 1)) assert_size_stride(primals_11, (4, 1), (1, 1)) assert_size_stride(primals_12, (4, 4), (4, 1)) assert_size_stride(primals_13, (4, 1), (1, 1)) assert_size_stride(primals_14, (4, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf0, primals_3, out=buf1) buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf0, primals_4, out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_add_leaky_relu_0[grid(16)](buf1, buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_gt_1[grid(16)](primals_5, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_6, out=buf9) del primals_6 buf10 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf9, primals_7, out=buf10) buf11 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf9, primals_8, out=buf11) buf12 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_add_leaky_relu_0[grid(16)](buf10, buf11, buf12, 16, XBLOCK=16, num_warps=1, num_stages=1) buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_9, out=buf17) del primals_9 buf18 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf17, primals_10, out=buf18) buf19 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf17, primals_11, out=buf19) buf20 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_add_leaky_relu_0[grid(16)](buf18, buf19, buf20, 16, XBLOCK=16, num_warps=1, num_stages=1) buf25 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_12, out=buf25) del primals_12 buf26 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf25, primals_13, out=buf26) buf27 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf25, primals_14, out=buf27) buf28 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_add_leaky_relu_0[grid(16)](buf26, buf27, buf28, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf13 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf21 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf29 = empty_strided_cuda((4, 1), (1, 4), torch.float32) triton_poi_fused__softmax_add_leaky_relu_mul_where_2[grid(4)](buf4, buf3, buf1, buf2, buf12, buf10, buf11, buf20, buf18, buf19, buf28, buf26, buf27, buf5, buf13, buf21, buf29, 4, XBLOCK=4, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf22 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf30 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_add_leaky_relu_mul_where_3[grid(16)](buf4, buf3, buf1, buf2, buf5, buf12, buf10, buf11, buf13, buf20, buf18, buf19, buf21, buf28, buf26, buf27, buf29, buf6, buf14, buf22, buf30, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf1 del buf10 del buf11 del buf13 del buf18 del buf19 del buf2 del buf21 del buf26 del buf27 del buf29 del buf5 buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_4[grid(16)](buf6, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) buf8 = buf6 del buf6 extern_kernels.mm(buf7, buf0, out=buf8) buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_4[grid(16)](buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = buf14 del buf14 extern_kernels.mm(buf15, buf9, out=buf16) buf23 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_4[grid(16)](buf22, buf23, 16, XBLOCK=16, num_warps=1, num_stages=1) buf24 = buf22 del buf22 extern_kernels.mm(buf23, buf17, out=buf24) buf31 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_4[grid(16)](buf30, buf31, 16, XBLOCK=16, num_warps=1, num_stages=1) buf32 = buf30 del buf30 extern_kernels.mm(buf31, buf25, out=buf32) buf33 = empty_strided_cuda((4, 16), (16, 1), torch.float32) triton_poi_fused_cat_5[grid(64)](buf8, buf16, buf24, buf32, buf33, 64, XBLOCK=64, num_warps=1, num_stages=1) return (buf33, buf3, buf4, buf7, buf8, buf12, buf15, buf16, buf20, buf23, buf24, buf28, buf31, buf32, reinterpret_tensor(buf25, (4, 4), (1, 4), 0), reinterpret_tensor(primals_14, (1, 4), (1, 1), 0), reinterpret_tensor(primals_13, (1, 4), (1, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), reinterpret_tensor(buf17, (4, 4), (1, 4), 0), reinterpret_tensor( primals_11, (1, 4), (1, 1), 0), reinterpret_tensor(primals_10, (1, 4), (1, 1), 0), reinterpret_tensor(buf9, (4, 4), (1, 4), 0), reinterpret_tensor(primals_8, (1, 4), (1, 1), 0), reinterpret_tensor(primals_7, (1, 4), (1, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), reinterpret_tensor( primals_4, (1, 4), (1, 1), 0), reinterpret_tensor(primals_3, (1, 4), (1, 1), 0)) class GraphAttentionLayer(nn.Module): """ Simple GAT layer, similar to https://arxiv.org/abs/1710.10903 """ def __init__(self, in_features, out_features, dropout, alpha, concat=True): super(GraphAttentionLayer, self).__init__() self.dropout = dropout self.in_features = in_features self.out_features = out_features self.alpha = alpha self.concat = concat self.W = nn.Parameter(nn.init.xavier_uniform_(torch.FloatTensor( in_features, out_features).type(torch.FloatTensor if torch.cuda .is_available() else torch.FloatTensor), gain=np.sqrt(2.0)), requires_grad=True) self.a1 = nn.Parameter(nn.init.xavier_uniform_(torch.FloatTensor( out_features, 1).type(torch.FloatTensor if torch.cuda. is_available() else torch.FloatTensor), gain=np.sqrt(2.0)), requires_grad=True) self.a2 = nn.Parameter(nn.init.xavier_uniform_(torch.FloatTensor( out_features, 1).type(torch.FloatTensor if torch.cuda. is_available() else torch.FloatTensor), gain=np.sqrt(2.0)), requires_grad=True) self.leakyrelu = nn.LeakyReLU(self.alpha) def forward(self, input, adj): h = torch.mm(input, self.W) h.size()[0] f_1 = h @ self.a1 f_2 = h @ self.a2 e = self.leakyrelu(f_1 + f_2.transpose(0, 1)) zero_vec = -9000000000000000.0 * torch.ones_like(e) attention = torch.where(adj > 0, e, zero_vec) attention = F.softmax(attention, dim=1) attention = F.dropout(attention, self.dropout, training=self.training) h_prime = torch.matmul(attention, h) if self.concat: return F.sigmoid(h_prime) else: return h_prime def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GATNew(nn.Module): def __init__(self, nfeat, nhid, dropout, alpha, nheads): super(GATNew, self).__init__() self.dropout = dropout self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)] for i, attention in enumerate(self.attentions): self.add_module('attention_{}'.format(i), attention) def forward(self, input_0, input_1): primals_1 = self.attention_0.W primals_3 = self.attention_0.a1 primals_4 = self.attention_0.a2 primals_2 = self.attention_1.W primals_7 = self.attention_1.a1 primals_8 = self.attention_1.a2 primals_5 = self.attention_2.W primals_10 = self.attention_2.a1 primals_11 = self.attention_2.a2 primals_6 = self.attention_3.W primals_13 = self.attention_3.a1 primals_14 = self.attention_3.a2 primals_9 = input_0 primals_12 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14]) return output[0]
iaongstudio/PaperRobot
GAT
false
3,674
[ "MIT" ]
0
d7d2a87822e1fb473e5c72ffc6b83d1022ecd3c1
https://github.com/iaongstudio/PaperRobot/tree/d7d2a87822e1fb473e5c72ffc6b83d1022ecd3c1
GLU
import torch import torch.nn.functional as F import torch.nn as nn class GLU(nn.Module): def __init__(self, dim): super(GLU, self).__init__() self.dim = dim def forward(self, x): return F.glu(x, self.dim) def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_glu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1), xmask) tmp1 = tl.load(in_ptr0 + (2 + x0 + 4 * x1), xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 2), (128, 32, 8, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_glu_0[grid(512)](arg0_1, buf0, 512, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class GLUNew(nn.Module): def __init__(self, dim): super(GLUNew, self).__init__() self.dim = dim def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ishine/tfm-tts
GLU
false
3,675
[ "MIT" ]
0
a964736467851ddec8f8e8933b9550cbe7d7d7eb
https://github.com/ishine/tfm-tts/tree/a964736467851ddec8f8e8933b9550cbe7d7d7eb
DownsampleA
import torch import torch.nn as nn class DownsampleA(nn.Module): def __init__(self, nIn, nOut, stride): super(DownsampleA, self).__init__() assert stride == 2 self.avg = nn.AvgPool2d(kernel_size=1, stride=stride) def forward(self, x): x = self.avg(x) return torch.cat((x, x.mul(0)), 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nIn': 4, 'nOut': 4, 'stride': 2}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 4 % 8 x0 = xindex % 2 x1 = xindex // 2 % 2 x3 = xindex // 32 x4 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2 + 64 * x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp13 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * (-4 + x2) + 64 * x3), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tmp13 * tmp6 tmp15 = 0.0 tmp16 = tmp14 * tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp10, tmp16, tmp17) tmp19 = tl.where(tmp4, tmp9, tmp18) tl.store(out_ptr0 + x4, tmp19, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 2, 2), (32, 4, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](arg0_1, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class DownsampleANew(nn.Module): def __init__(self, nIn, nOut, stride): super(DownsampleANew, self).__init__() assert stride == 2 self.avg = nn.AvgPool2d(kernel_size=1, stride=stride) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
gianlucagiudice/PyCIL
DownsampleA
false
3,676
[ "MIT" ]
0
0db88f239b935ea6d0047918a2a55a703f707b04
https://github.com/gianlucagiudice/PyCIL/tree/0db88f239b935ea6d0047918a2a55a703f707b04
NAE
import torch import torch.nn as nn class NAE(nn.Module): def __init__(self): super().__init__() def forward(self, pred, gt): diff = torch.abs(pred - gt) loss = torch.mean(torch.abs(diff / gt)) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_div_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tmp3 / tmp1 tmp5 = tl_math.abs(tmp4) tmp6 = tl.broadcast_to(tmp5, [RBLOCK]) tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0)) tmp9 = 256.0 tmp10 = tmp8 / tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_div_mean_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class NAENew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
j1a0m0e4sNTU/MachineLearning2019
NAE
false
3,677
[ "MIT" ]
0
44a7a3387837e53134bcf5eb8fcf95daf4dff48d
https://github.com/j1a0m0e4sNTU/MachineLearning2019/tree/44a7a3387837e53134bcf5eb8fcf95daf4dff48d
FixedSubnetConv
import math import torch import torch.multiprocessing import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.nn.functional as F class FixedSubnetConv(nn.Conv2d): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.scores = nn.Parameter(torch.Tensor(self.weight.size())) nn.init.kaiming_uniform_(self.scores, a=math.sqrt(5)) def set_prune_rate(self, prune_rate): self.prune_rate = prune_rate None def set_subnet(self): output = self.clamped_scores().clone() _, idx = self.clamped_scores().flatten().abs().sort() p = int(self.prune_rate * self.clamped_scores().numel()) flat_oup = output.flatten() flat_oup[idx[:p]] = 0 flat_oup[idx[p:]] = 1 self.scores = torch.nn.Parameter(output) self.scores.requires_grad = False def clamped_scores(self): return self.scores.abs() def get_subnet(self): return self.weight * self.scores def forward(self, x): w = self.get_subnet() x = F.conv2d(x, w, self.bias, self.stride, self.padding, self. dilation, self.groups) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.multiprocessing import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(primals_4, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(16)](buf2, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 return buf2, primals_1, primals_2, primals_4, buf0 class FixedSubnetConvNew(nn.Conv2d): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.scores = nn.Parameter(torch.Tensor(self.weight.size())) nn.init.kaiming_uniform_(self.scores, a=math.sqrt(5)) def set_prune_rate(self, prune_rate): self.prune_rate = prune_rate None def set_subnet(self): output = self.clamped_scores().clone() _, idx = self.clamped_scores().flatten().abs().sort() p = int(self.prune_rate * self.clamped_scores().numel()) flat_oup = output.flatten() flat_oup[idx[:p]] = 0 flat_oup[idx[p:]] = 1 self.scores = torch.nn.Parameter(output) self.scores.requires_grad = False def clamped_scores(self): return self.scores.abs() def get_subnet(self): return self.weight * self.scores def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = self.scores primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
isamu-isozaki/hidden-networks
FixedSubnetConv
false
3,678
[ "Apache-2.0" ]
0
7dcb96a7de43b65ffde176d771f88b5ecedb84ab
https://github.com/isamu-isozaki/hidden-networks/tree/7dcb96a7de43b65ffde176d771f88b5ecedb84ab
LayerNorm
import torch import torch.nn as nn class LayerNorm(nn.Module): def __init__(self, channels, eps=1e-05): super().__init__() self.channels = channels self.eps = eps self.gamma = nn.Parameter(torch.ones(channels)) self.beta = nn.Parameter(torch.zeros(channels)) def forward(self, x): mean = torch.mean(x, 1, keepdim=True) variance = torch.mean((x - mean) ** 2, 1, keepdim=True) x = (x - mean) * torch.rsqrt(variance + self.eps) x = x * self.gamma.view(1, -1, 1) + self.beta.view(1, -1, 1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_add_mean_mul_pow_rsqrt_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x3 = xindex // 64 x5 = xindex % 16 x1 = xindex // 4 % 4 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + (x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp18 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = 1e-05 tmp15 = tmp13 + tmp14 tmp16 = libdevice.rsqrt(tmp15) tmp17 = tmp0 * tmp16 tmp19 = tmp17 * tmp18 tmp21 = tmp19 + tmp20 tl.store(out_ptr0 + x4, tmp21, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_sub_0[grid(256)](primals_1, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mean_mul_pow_rsqrt_1[grid(256)](buf0, primals_2, primals_3, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 del primals_3 return buf1, primals_1 class LayerNormNew(nn.Module): def __init__(self, channels, eps=1e-05): super().__init__() self.channels = channels self.eps = eps self.gamma = nn.Parameter(torch.ones(channels)) self.beta = nn.Parameter(torch.zeros(channels)) def forward(self, input_0): primals_2 = self.gamma primals_3 = self.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ishine/tfm-tts
LayerNorm
false
3,679
[ "MIT" ]
0
a964736467851ddec8f8e8933b9550cbe7d7d7eb
https://github.com/ishine/tfm-tts/tree/a964736467851ddec8f8e8933b9550cbe7d7d7eb
WMAE
import torch import torch.nn as nn class WMAE(nn.Module): def __init__(self): super().__init__() self.weight = [300, 1, 200] def forward(self, pred, gt): diff = torch.abs(pred - gt) loss = 0 for i in range(3): loss += torch.sum(diff[:, i] * self.weight[i]) loss /= gt.size(0) * sum(self.weight) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mul_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp9 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp10 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp18 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp19 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 300.0 tmp5 = tmp3 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tmp11 = tmp9 - tmp10 tmp12 = tl_math.abs(tmp11) tmp13 = 1.0 tmp14 = tmp12 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.sum(tmp15, 1)[:, None] tmp20 = tmp18 - tmp19 tmp21 = tl_math.abs(tmp20) tmp22 = 200.0 tmp23 = tmp21 * tmp22 tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = tl.sum(tmp24, 1)[:, None] tmp27 = 0.0 tmp28 = tmp8 + tmp27 tmp29 = tmp28 + tmp17 tmp30 = tmp29 + tmp26 tmp31 = 0.000499001996007984 tmp32 = tmp30 * tmp31 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp32, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf3 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_mul_sum_0[grid(1)](buf3, arg0_1, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf3, class WMAENew(nn.Module): def __init__(self): super().__init__() self.weight = [300, 1, 200] def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
j1a0m0e4sNTU/MachineLearning2019
WMAE
false
3,680
[ "MIT" ]
0
44a7a3387837e53134bcf5eb8fcf95daf4dff48d
https://github.com/j1a0m0e4sNTU/MachineLearning2019/tree/44a7a3387837e53134bcf5eb8fcf95daf4dff48d
ResBlock
import torch import torch.nn as nn def set_activate_layer(types): if types == 'relu': activation = nn.ReLU() elif types == 'lrelu': activation = nn.LeakyReLU(0.2) elif types == 'tanh': activation = nn.Tanh() elif types == 'sig': activation = nn.Sigmoid() elif types == 'none': activation = None else: assert 0, f'Unsupported activation: {types}' return activation def set_norm_layer(norm_type, norm_dim): if norm_type == 'bn': norm = nn.BatchNorm2d(norm_dim) elif norm_type == 'in': norm = nn.InstanceNorm2d(norm_dim) elif norm_type == 'none': norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) return norm class Interpolate(nn.Module): def __init__(self, scale_factor, mode='bilinear'): super(Interpolate, self).__init__() self.interp = nn.functional.interpolate self.scale_factor = scale_factor self.mode = mode def forward(self, x): x = self.interp(x, scale_factor=self.scale_factor, mode=self.mode, align_corners=False) return x class ResBlock(nn.Module): def __init__(self, in_c, out_c, scale_factor=1, norm='in', activation= 'lrelu'): super(ResBlock, self).__init__() self.norm1 = set_norm_layer(norm, out_c) self.norm2 = set_norm_layer(norm, out_c) self.activ = set_activate_layer(activation) self.conv1 = nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=3, stride=1, padding=1, bias=False) self.conv2 = nn.Conv2d(in_channels=out_c, out_channels=out_c, kernel_size=3, stride=1, padding=1, bias=False) self.conv1x1 = nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, stride=1, padding=0, bias=False) self.resize = Interpolate(scale_factor=scale_factor) def forward(self, feat): feat1 = self.norm1(feat) feat1 = self.activ(feat1) feat1 = self.conv1(feat1) feat1 = self.resize(feat1) feat1 = self.norm2(feat1) feat1 = self.activ(feat1) feat1 = self.conv2(feat1) feat2 = self.conv1x1(feat) feat2 = self.resize(feat2) return feat1 + feat2 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_c': 4, 'out_c': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__native_batch_norm_legit_leaky_relu_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp24 = 0.0 tmp25 = tmp23 > tmp24 tmp26 = 0.2 tmp27 = tmp23 * tmp26 tmp28 = tl.where(tmp25, tmp23, tmp27) tl.store(out_ptr2 + (r1 + 16 * x0), tmp28, xmask) @triton.jit def triton_poi_fused__to_copy_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tl.store(out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_poi_fused_add_clamp_2(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 3, tl.int64) tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tmp9.to(tl.float32) tmp11 = tmp8 - tmp10 tmp12 = triton_helpers.maximum(tmp11, tmp7) tmp13 = triton_helpers.minimum(tmp12, tmp4) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit__unsafe_index_add_leaky_relu_mul_sub_4( in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr ): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex // 4 r1 = rindex % 4 x0 = xindex r3 = rindex tmp0 = tl.load(in_ptr0 + r2, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + r1, None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr4 + r1, None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr5 + r2, None, eviction_policy='evict_last') tmp29 = tl.load(in_ptr6 + r2, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 4 * tmp4 + 16 * x0), xmask, eviction_policy='evict_last') tmp11 = tmp10 + tmp1 tmp12 = tmp10 < 0 tmp13 = tl.where(tmp12, tmp11, tmp10) tmp14 = tl.load(in_ptr2 + (tmp13 + 4 * tmp4 + 16 * x0), xmask, eviction_policy='evict_last') tmp15 = tmp14 - tmp9 tmp17 = tmp15 * tmp16 tmp18 = tmp9 + tmp17 tmp20 = tmp19 + tmp1 tmp21 = tmp19 < 0 tmp22 = tl.where(tmp21, tmp20, tmp19) tmp23 = tl.load(in_ptr2 + (tmp8 + 4 * tmp22 + 16 * x0), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr2 + (tmp13 + 4 * tmp22 + 16 * x0), xmask, eviction_policy='evict_last') tmp25 = tmp24 - tmp23 tmp26 = tmp25 * tmp16 tmp27 = tmp23 + tmp26 tmp28 = tmp27 - tmp18 tmp30 = tmp28 * tmp29 tmp31 = tmp18 + tmp30 tmp32 = tl.broadcast_to(tmp31, [XBLOCK, RBLOCK]) tl.where(xmask, tmp32, 0) tmp35 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp37 = tl.where(xmask, tmp35, 0) tmp38 = tl.sum(tmp37, 1)[:, None] tmp39 = tl.full([XBLOCK, 1], 16, tl.int32) tmp40 = tmp39.to(tl.float32) tmp41 = tmp38 / tmp40 tmp42 = tmp32 - tmp41 tmp43 = tmp42 * tmp42 tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK]) tmp46 = tl.where(xmask, tmp44, 0) tmp47 = tl.sum(tmp46, 1)[:, None] tmp48 = 16.0 tmp49 = tmp47 / tmp48 tmp50 = 1e-05 tmp51 = tmp49 + tmp50 tmp52 = libdevice.rsqrt(tmp51) tmp53 = tmp31 - tmp41 tmp54 = tmp53 * tmp52 tmp55 = 0.0 tmp56 = tmp54 > tmp55 tmp57 = 0.2 tmp58 = tmp54 * tmp57 tmp59 = tl.where(tmp56, tmp54, tmp58) tl.store(in_out_ptr0 + (r3 + 16 * x0), tmp31, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp52, xmask) tl.store(out_ptr1 + (r3 + 16 * x0), tmp59, xmask) tl.store(out_ptr0 + x0, tmp41, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_mul_sub_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp19 = tl.load(in_out_ptr0 + x4, xmask) tmp20 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 4 * tmp4 + 16 * x2), xmask, eviction_policy='evict_last') tmp11 = tmp10 + tmp1 tmp12 = tmp10 < 0 tmp13 = tl.where(tmp12, tmp11, tmp10) tmp14 = tl.load(in_ptr2 + (tmp13 + 4 * tmp4 + 16 * x2), xmask, eviction_policy='evict_last') tmp15 = tmp14 - tmp9 tmp17 = tmp15 * tmp16 tmp18 = tmp9 + tmp17 tmp21 = tmp20 + tmp1 tmp22 = tmp20 < 0 tmp23 = tl.where(tmp22, tmp21, tmp20) tmp24 = tl.load(in_ptr2 + (tmp8 + 4 * tmp23 + 16 * x2), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr2 + (tmp13 + 4 * tmp23 + 16 * x2), xmask, eviction_policy='evict_last') tmp26 = tmp25 - tmp24 tmp27 = tmp26 * tmp16 tmp28 = tmp24 + tmp27 tmp29 = tmp28 - tmp18 tmp31 = tmp29 * tmp30 tmp32 = tmp18 + tmp31 tmp33 = tmp19 + tmp32 tl.store(in_out_ptr0 + x4, tmp33, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused__native_batch_norm_legit_leaky_relu_0[grid(16)]( primals_1, buf3, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = empty_strided_cuda((4, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_1[grid(4)](buf5, 4, XBLOCK=4, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_2[grid(4)](buf6, 4, XBLOCK=4, num_warps= 1, num_stages=1) buf7 = empty_strided_cuda((4,), (1,), torch.int64) triton_poi_fused__to_copy_1[grid(4)](buf7, 4, XBLOCK=4, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4,), (1,), torch.int64) triton_poi_fused_add_clamp_2[grid(4)](buf8, 4, XBLOCK=4, num_warps= 1, num_stages=1) buf9 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3[grid(4)](buf9, 4, XBLOCK=4, num_warps=1, num_stages=1) buf11 = empty_strided_cuda((4, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3[grid(4)](buf11, 4, XBLOCK=4, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf12 = buf10 del buf10 buf13 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf14 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) buf16 = reinterpret_tensor(buf14, (1, 16, 1, 1), (16, 1, 1, 1), 0) del buf14 buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_per_fused__native_batch_norm_legit__unsafe_index_add_leaky_relu_mul_sub_4[ grid(16)](buf12, buf16, buf5, buf7, buf4, buf8, buf9, buf6, buf11, buf13, buf17, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf4 buf18 = extern_kernels.convolution(buf17, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 4, 4, 4), (64, 16, 4, 1)) buf19 = extern_kernels.convolution(primals_1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 4, 4, 4), (64, 16, 4, 1)) buf21 = buf18 del buf18 triton_poi_fused__unsafe_index_add_mul_sub_5[grid(256)](buf21, buf5, buf7, buf19, buf8, buf9, buf6, buf11, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf19 return (buf21, primals_1, primals_2, primals_3, primals_4, buf3, buf5, buf6, buf7, buf8, buf9, buf11, reinterpret_tensor(buf12, (1, 16, 4, 4), (256, 16, 4, 1), 0), buf13, buf16, buf17) def set_activate_layer(types): if types == 'relu': activation = nn.ReLU() elif types == 'lrelu': activation = nn.LeakyReLU(0.2) elif types == 'tanh': activation = nn.Tanh() elif types == 'sig': activation = nn.Sigmoid() elif types == 'none': activation = None else: assert 0, f'Unsupported activation: {types}' return activation def set_norm_layer(norm_type, norm_dim): if norm_type == 'bn': norm = nn.BatchNorm2d(norm_dim) elif norm_type == 'in': norm = nn.InstanceNorm2d(norm_dim) elif norm_type == 'none': norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) return norm class Interpolate(nn.Module): def __init__(self, scale_factor, mode='bilinear'): super(Interpolate, self).__init__() self.interp = nn.functional.interpolate self.scale_factor = scale_factor self.mode = mode def forward(self, x): x = self.interp(x, scale_factor=self.scale_factor, mode=self.mode, align_corners=False) return x class ResBlockNew(nn.Module): def __init__(self, in_c, out_c, scale_factor=1, norm='in', activation= 'lrelu'): super(ResBlockNew, self).__init__() self.norm1 = set_norm_layer(norm, out_c) self.norm2 = set_norm_layer(norm, out_c) self.activ = set_activate_layer(activation) self.conv1 = nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=3, stride=1, padding=1, bias=False) self.conv2 = nn.Conv2d(in_channels=out_c, out_channels=out_c, kernel_size=3, stride=1, padding=1, bias=False) self.conv1x1 = nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, stride=1, padding=0, bias=False) self.resize = Interpolate(scale_factor=scale_factor) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv2.weight primals_4 = self.conv1x1.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
innerverz/CodeTemplate
ResBlock
false
3,681
[ "MIT" ]
0
a20f5d24b0b79871aa39b5cde33e3bb4d2507d13
https://github.com/innerverz/CodeTemplate/tree/a20f5d24b0b79871aa39b5cde33e3bb4d2507d13
MSE
import torch import torch.nn as nn class MSE(nn.Module): def __init__(self): super().__init__() def forward(self, pred, gt): loss = torch.mean(torch.pow(pred - gt, 2)) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_pow_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class MSENew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
j1a0m0e4sNTU/MachineLearning2019
MSE
false
3,682
[ "MIT" ]
0
44a7a3387837e53134bcf5eb8fcf95daf4dff48d
https://github.com/j1a0m0e4sNTU/MachineLearning2019/tree/44a7a3387837e53134bcf5eb8fcf95daf4dff48d
ComplexConv
import torch import torch.nn as nn import torch.utils.data class ComplexConv(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super(ComplexConv, self).__init__() self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.padding = padding self.conv_re = nn.Conv2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation, groups= groups, bias=bias) self.conv_im = nn.Conv2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation, groups= groups, bias=bias) def forward(self, x): real = self.conv_re(x[:, 0]) - self.conv_im(x[:, 1]) imaginary = self.conv_re(x[:, 1]) + self.conv_im(x[:, 0]) output = torch.stack((real, imaginary), dim=1) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_stack_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.load(in_ptr2 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tl.load(in_ptr3 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tmp8 + tmp9 tmp11 = tmp7 - tmp10 tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tl.full([1], 2, tl.int64) tmp17 = tl.load(in_ptr4 + x1, tmp14 & xmask, eviction_policy= 'evict_last', other=0.0) tmp18 = tl.load(in_ptr1 + x1, tmp14 & xmask, eviction_policy= 'evict_last', other=0.0) tmp19 = tmp17 + tmp18 tmp20 = tl.load(in_ptr5 + x1, tmp14 & xmask, eviction_policy= 'evict_last', other=0.0) tmp21 = tl.load(in_ptr3 + x1, tmp14 & xmask, eviction_policy= 'evict_last', other=0.0) tmp22 = tmp20 + tmp21 tmp23 = tmp19 + tmp22 tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp14, tmp23, tmp24) tmp26 = tl.where(tmp4, tmp13, tmp25) tl.store(out_ptr0 + x2, tmp26, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4, 4), (0, 64, 4, 1), 0), primals_2, stride=(1, 1), padding= (0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0 ), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 1, 1), (4, 1, 1, 1)) buf1 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4, 4), (0, 64, 4, 1), 16), primals_4, stride=(1, 1), padding =(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (1, 4, 1, 1), (4, 1, 1, 1)) buf2 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4, 4), (0, 64, 4, 1), 16), primals_2, stride=(1, 1), padding =(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (1, 4, 1, 1), (4, 1, 1, 1)) buf3 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4, 4), (0, 64, 4, 1), 0), primals_4, stride=(1, 1), padding= (0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0 ), groups=1, bias=None) assert_size_stride(buf3, (1, 4, 1, 1), (4, 1, 1, 1)) buf4 = empty_strided_cuda((4, 2, 1), (2, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_stack_0[grid(8)](buf0, primals_3, buf1, primals_5, buf2, buf3, buf4, 8, XBLOCK=8, num_warps=1, num_stages=1) del buf0 del buf1 del buf2 del buf3 del primals_3 del primals_5 return reinterpret_tensor(buf4, (4, 2, 1, 1), (2, 1, 1, 1), 0 ), primals_2, primals_4, reinterpret_tensor(primals_1, (1, 4, 4, 4), (256, 64, 4, 1), 0), reinterpret_tensor(primals_1, (1, 4, 4, 4), ( 256, 64, 4, 1), 16) class ComplexConvNew(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super(ComplexConvNew, self).__init__() self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.padding = padding self.conv_re = nn.Conv2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation, groups= groups, bias=bias) self.conv_im = nn.Conv2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation, groups= groups, bias=bias) def forward(self, input_0): primals_1 = self.conv_re.weight primals_3 = self.conv_re.bias primals_2 = self.conv_im.weight primals_5 = self.conv_im.bias primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
ishine/multiASR
ComplexConv
false
3,683
[ "Apache-2.0" ]
0
991ea2b12ea8ea4a4beeeba42c156e632c389062
https://github.com/ishine/multiASR/tree/991ea2b12ea8ea4a4beeeba42c156e632c389062
CausalSelfAttention
import math import torch import torch.nn as nn import torch.nn.functional as F class CausalSelfAttention(nn.Module): """ A vanilla multi-head masked self-attention layer with a projection at the end. It is possible to use torch.nn.MultiheadAttention here but I am including an explicit implementation here to show that there is nothing too scary here. """ def __init__(self, n_embd, n_head, attn_pdrop, resid_pdrop): super().__init__() assert n_embd % n_head == 0 self.key = nn.Linear(n_embd, n_embd) self.query = nn.Linear(n_embd, n_embd) self.value = nn.Linear(n_embd, n_embd) self.attn_drop = nn.Dropout(attn_pdrop) self.resid_drop = nn.Dropout(resid_pdrop) self.proj = nn.Linear(n_embd, n_embd) self.n_head = n_head def forward(self, x, attn_mask: 'torch.Tensor'=None, valid_input_mask: 'torch.Tensor'=None, mask_value=-1000000.0): """mask should be a 3D tensor of shape (B, T, T)""" B, T, C = x.size() k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose( 1, 2) q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose( 1, 2) v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose( 1, 2) att = q @ k.transpose(-2, -1) * (1.0 / math.sqrt(k.size(-1))) if attn_mask is not None: att = att.masked_fill(attn_mask.unsqueeze(1) == 0, mask_value) if valid_input_mask is not None: att = att.masked_fill(valid_input_mask.unsqueeze(1).unsqueeze(2 ) == 0, mask_value) att = F.softmax(att, dim=-1) att = self.attn_drop(att) y = att @ v y = y.transpose(1, 2).contiguous().view(B, T, C) y = self.resid_drop(self.proj(y)) return y def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'n_embd': 4, 'n_head': 4, 'attn_pdrop': 0.5, 'resid_pdrop': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf3, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf4 = reinterpret_tensor(buf1, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf1 triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf6 buf8 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf0 triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_7, buf8, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_3[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0) del buf9 extern_kernels.addmm(primals_9, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf11) del primals_9 return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf10, (16, 4), (4, 1), 0 ), primals_8, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0) class CausalSelfAttentionNew(nn.Module): """ A vanilla multi-head masked self-attention layer with a projection at the end. It is possible to use torch.nn.MultiheadAttention here but I am including an explicit implementation here to show that there is nothing too scary here. """ def __init__(self, n_embd, n_head, attn_pdrop, resid_pdrop): super().__init__() assert n_embd % n_head == 0 self.key = nn.Linear(n_embd, n_embd) self.query = nn.Linear(n_embd, n_embd) self.value = nn.Linear(n_embd, n_embd) self.attn_drop = nn.Dropout(attn_pdrop) self.resid_drop = nn.Dropout(resid_pdrop) self.proj = nn.Linear(n_embd, n_embd) self.n_head = n_head def forward(self, input_0): primals_2 = self.key.weight primals_3 = self.key.bias primals_4 = self.query.weight primals_5 = self.query.bias primals_6 = self.value.weight primals_7 = self.value.bias primals_8 = self.proj.weight primals_9 = self.proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
itsdaniele/graphtrans
CausalSelfAttention
false
3,684
[ "Apache-2.0" ]
0
9cdf68af725b258deced4424dbcd5942a481ff8d
https://github.com/itsdaniele/graphtrans/tree/9cdf68af725b258deced4424dbcd5942a481ff8d
TransformerEncoderLayer
from torch.nn import Module import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Linear from torch.nn import Dropout from torch.nn import LayerNorm from torch.nn import Identity def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False): """ Obtained from: github.com:rwightman/pytorch-image-models Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0],) + (1,) * (x.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x. device) random_tensor.floor_() output = x.div(keep_prob) * random_tensor return output class DropPath(nn.Module): """ Obtained from: github.com:rwightman/pytorch-image-models Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) class Attention(Module): """ Obtained from timm: github.com:rwightman/pytorch-image-models """ def __init__(self, dim, num_heads=8, attention_dropout=0.1, projection_dropout=0.1): super().__init__() self.num_heads = num_heads head_dim = dim // self.num_heads self.scale = head_dim ** -0.5 self.qkv = Linear(dim, dim * 3, bias=False) self.attn_drop = Dropout(attention_dropout) self.proj = Linear(dim, dim) self.proj_drop = Dropout(projection_dropout) def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads ).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] attn = q @ k.transpose(-2, -1) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x, attn class TransformerEncoderLayer(Module): """ Inspired by torch.nn.TransformerEncoderLayer and timm. """ def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, attention_dropout=0.1, drop_path_rate=0.1): super(TransformerEncoderLayer, self).__init__() self.pre_norm = LayerNorm(d_model) self.self_attn = Attention(dim=d_model, num_heads=nhead, attention_dropout=attention_dropout, projection_dropout=dropout) self.linear1 = Linear(d_model, dim_feedforward) self.dropout1 = Dropout(dropout) self.norm1 = LayerNorm(d_model) self.linear2 = Linear(dim_feedforward, d_model) self.dropout2 = Dropout(dropout) self.drop_path = DropPath(drop_path_rate ) if drop_path_rate > 0 else Identity() self.activation = F.gelu def forward(self, src: 'torch.Tensor', *args, **kwargs) ->torch.Tensor: x, w = self.self_attn(self.pre_norm(src)) src = src + self.drop_path(x) src = self.norm1(src) src2 = self.linear2(self.dropout1(self.activation(self.linear1(src)))) src = src + self.drop_path(self.dropout2(src2)) return src, w def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'nhead': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch.nn import Module import torch.nn as nn import torch.nn.functional as F from torch.nn import Linear from torch.nn import Dropout from torch.nn import LayerNorm from torch.nn import Identity assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (8 + y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + 1) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr2 + 2) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr2 + 3) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp10 = tmp7 + tmp9 tmp11 = tmp6 + tmp10 tmp12 = tmp5 + tmp11 tmp17 = tmp14 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp12 + tmp18 tmp24 = tmp21 + tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + x0, tmp28, xmask) tl.store(out_ptr1 + x0, tmp40, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp6 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_gelu_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, None) @triton.jit def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (2048, 4), (4, 1)) assert_size_stride(primals_10, (2048,), (1,)) assert_size_stride(primals_11, (4, 2048), (2048, 1)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_2 buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_2[grid(16, 4)](buf3, buf4, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(16, 4)](buf3, buf5, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(256)](buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused__softmax_5[grid(256)](buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf7 buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_6[grid(16, 4)](buf3, buf9, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf3 buf10 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_7[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0) del buf10 extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf12) buf13 = buf1 del buf1 buf14 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_8[grid(16)](primals_3, buf12, primals_6, buf13, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_9[grid(64)](primals_3, buf12, primals_6, buf13, buf14, primals_7, primals_8, buf15, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf13 del buf14 del primals_8 buf16 = empty_strided_cuda((16, 2048), (2048, 1), torch.float32) extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 2048), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_10 buf17 = empty_strided_cuda((4, 4, 2048), (8192, 2048, 1), torch.float32 ) triton_poi_fused_gelu_10[grid(32768)](buf16, buf17, 32768, XBLOCK= 256, num_warps=4, num_stages=1) buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf17, (16, 2048), (2048, 1), 0), reinterpret_tensor(primals_11, (2048, 4), (1, 2048), 0), out=buf18) buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0) del buf18 triton_poi_fused_add_11[grid(64)](buf19, buf15, primals_12, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 return buf19, buf8, primals_3, primals_6, primals_7, reinterpret_tensor( buf2, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0 ), buf16, reinterpret_tensor(buf17, (16, 2048), (2048, 1), 0 ), primals_11, primals_9, primals_5, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_4 def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False): """ Obtained from: github.com:rwightman/pytorch-image-models Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0],) + (1,) * (x.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x. device) random_tensor.floor_() output = x.div(keep_prob) * random_tensor return output class DropPath(nn.Module): """ Obtained from: github.com:rwightman/pytorch-image-models Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) class Attention(Module): """ Obtained from timm: github.com:rwightman/pytorch-image-models """ def __init__(self, dim, num_heads=8, attention_dropout=0.1, projection_dropout=0.1): super().__init__() self.num_heads = num_heads head_dim = dim // self.num_heads self.scale = head_dim ** -0.5 self.qkv = Linear(dim, dim * 3, bias=False) self.attn_drop = Dropout(attention_dropout) self.proj = Linear(dim, dim) self.proj_drop = Dropout(projection_dropout) def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads ).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] attn = q @ k.transpose(-2, -1) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x, attn class TransformerEncoderLayerNew(Module): """ Inspired by torch.nn.TransformerEncoderLayer and timm. """ def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, attention_dropout=0.1, drop_path_rate=0.1): super(TransformerEncoderLayerNew, self).__init__() self.pre_norm = LayerNorm(d_model) self.self_attn = Attention(dim=d_model, num_heads=nhead, attention_dropout=attention_dropout, projection_dropout=dropout) self.linear1 = Linear(d_model, dim_feedforward) self.dropout1 = Dropout(dropout) self.norm1 = LayerNorm(d_model) self.linear2 = Linear(dim_feedforward, d_model) self.dropout2 = Dropout(dropout) self.drop_path = DropPath(drop_path_rate ) if drop_path_rate > 0 else Identity() self.activation = F.gelu def forward(self, input_0): primals_1 = self.pre_norm.weight primals_2 = self.pre_norm.bias primals_4 = self.self_attn.qkv.weight primals_5 = self.self_attn.proj.weight primals_6 = self.self_attn.proj.bias primals_9 = self.linear1.weight primals_10 = self.linear1.bias primals_7 = self.norm1.weight primals_8 = self.norm1.bias primals_11 = self.linear2.weight primals_12 = self.linear2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0], output[1]
iliasprc/Compact-Transformers
TransformerEncoderLayer
false
3,685
[ "Apache-2.0" ]
0
31975a0b4469854dfb0e0cbcedd8f0698cf84a7e
https://github.com/iliasprc/Compact-Transformers/tree/31975a0b4469854dfb0e0cbcedd8f0698cf84a7e
Net
import torch import torch.nn as nn class FcCat(nn.Module): def __init__(self, nIn, nOut): super(FcCat, self).__init__() self.fc = nn.Linear(nIn, nOut, bias=False) def forward(self, x): out = torch.cat((x, self.fc(x)), 1) return out class Net(nn.Module): def __init__(self, nFeatures, nHidden1, nHidden2): super(Net, self).__init__() self.l1 = FcCat(nFeatures, nHidden1) self.l2 = FcCat(nFeatures + nHidden1, nHidden2) def forward(self, x): out = self.l1(x) out = self.l2(out) return out def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'nFeatures': 4, 'nHidden1': 4, 'nHidden2': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 8 * x1), tmp0, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 x1 = xindex // 8 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 12 * x1), tmp0, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32) buf0 = reinterpret_tensor(buf2, (4, 4), (8, 1), 4) extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf2, (4, 4), (8, 1), 0) get_raw_stream(0) triton_poi_fused_cat_0[grid(16)](primals_2, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 12), (12, 1), torch.float32) buf3 = reinterpret_tensor(buf5, (4, 4), (12, 1), 8) extern_kernels.mm(buf2, reinterpret_tensor(primals_3, (8, 4), (1, 8 ), 0), out=buf3) buf4 = reinterpret_tensor(buf5, (4, 8), (12, 1), 0) triton_poi_fused_cat_1[grid(32)](buf2, buf4, 32, XBLOCK=32, num_warps=1, num_stages=1) return buf5, primals_2, buf2, primals_3 class FcCat(nn.Module): def __init__(self, nIn, nOut): super(FcCat, self).__init__() self.fc = nn.Linear(nIn, nOut, bias=False) def forward(self, x): out = torch.cat((x, self.fc(x)), 1) return out class NetNew(nn.Module): def __init__(self, nFeatures, nHidden1, nHidden2): super(NetNew, self).__init__() self.l1 = FcCat(nFeatures, nHidden1) self.l2 = FcCat(nFeatures + nHidden1, nHidden2) def forward(self, input_0): primals_1 = self.l1.fc.weight primals_3 = self.l2.fc.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
huangzsdy/pytorch_basic_learning
Net
false
3,686
[ "Apache-2.0" ]
0
7880bc3fcee1d38623d93fa2a36482ccde0e335a
https://github.com/huangzsdy/pytorch_basic_learning/tree/7880bc3fcee1d38623d93fa2a36482ccde0e335a
CriticArchitecture
import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def hidden_init(layer): """ Initializer function for weights in Pytorch :param layer: number of hidden layers to implement :return: None """ fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class RLModel: def __init__(self, random_seed): np.random.seed(random_seed) torch.manual_seed(random_seed) def copy_weights_from(self, net, tau=0.001): for local_param, ext_param in zip(self.parameters(), net.parameters()): local_param.data.copy_((1 - tau) * local_param.data + tau * ext_param.data) class CriticArchitecture(nn.Module, RLModel): def __init__(self, state_size, action_size, random_seed): """ Neural network used to implement the critic function :param state_size: size of the state (int) :param action_size: size of the action space (int) :param random_seed: seed for the random processes (int) """ super(CriticArchitecture, self).__init__() torch.manual_seed(random_seed) self.fc1 = nn.Linear(state_size, 256) self.fc2 = nn.Linear(256 + action_size, 256) self.fc3 = nn.Linear(256, 256) self.fc4 = nn.Linear(256, 1) self.reset_parameters() def forward(self, x, actions): """ Forward pass of the neural network :param x: states (tensor) :param actions: actions taken (tensor) :return: output of the network (tenso) """ h = F.relu(self.fc1(x)) h = torch.cat([h, actions], dim=1) h = F.relu(self.fc2(h)) h = F.relu(self.fc3(h)) out = self.fc4(h) return out def reset_parameters(self): """ Neural networks weights initalization :return: None """ self.fc1.weight.data.uniform_(*hidden_init(self.fc1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(-0.003, 0.003) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4, 'random_seed': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1040 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 260 x1 = xindex // 260 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 256, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (256 * x1 + x0), tmp4 & xmask, eviction_policy ='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 260, tl.int64) tmp15 = tl.load(in_ptr2 + (4 * x1 + (-256 + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (256, 260), (260, 1)) assert_size_stride(primals_6, (256,), (1,)) assert_size_stride(primals_7, (256, 256), (256, 1)) assert_size_stride(primals_8, (256,), (1,)) assert_size_stride(primals_9, (1, 256), (256, 1)) assert_size_stride(primals_10, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 260), (260, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(1040)](buf0, primals_2, primals_4, buf1, 1040, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 buf2 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_5, (260, 256), ( 1, 260), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(1024)](buf3, primals_6, 1024, XBLOCK= 256, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(buf3, reinterpret_tensor(primals_7, (256, 256), ( 1, 256), 0), out=buf4) buf5 = buf4 del buf4 triton_poi_fused_relu_1[grid(1024)](buf5, primals_8, 1024, XBLOCK= 256, num_warps=4, num_stages=1) del primals_8 buf7 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_10, buf5, reinterpret_tensor(primals_9, (256, 1), (1, 256), 0), alpha=1, beta=1, out=buf7) del primals_10 buf8 = empty_strided_cuda((4, 256), (256, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(1024)](buf0, primals_2, buf8, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 return (buf7, primals_3, buf1, buf3, buf5, primals_9, primals_7, primals_5, buf8) def hidden_init(layer): """ Initializer function for weights in Pytorch :param layer: number of hidden layers to implement :return: None """ fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class RLModel: def __init__(self, random_seed): np.random.seed(random_seed) torch.manual_seed(random_seed) def copy_weights_from(self, net, tau=0.001): for local_param, ext_param in zip(self.parameters(), net.parameters()): local_param.data.copy_((1 - tau) * local_param.data + tau * ext_param.data) class CriticArchitectureNew(nn.Module, RLModel): def __init__(self, state_size, action_size, random_seed): """ Neural network used to implement the critic function :param state_size: size of the state (int) :param action_size: size of the action space (int) :param random_seed: seed for the random processes (int) """ super(CriticArchitectureNew, self).__init__() torch.manual_seed(random_seed) self.fc1 = nn.Linear(state_size, 256) self.fc2 = nn.Linear(256 + action_size, 256) self.fc3 = nn.Linear(256, 256) self.fc4 = nn.Linear(256, 1) self.reset_parameters() def reset_parameters(self): """ Neural networks weights initalization :return: None """ self.fc1.weight.data.uniform_(*hidden_init(self.fc1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(-0.003, 0.003) def forward(self, input_0, input_1): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_5 = self.fc2.weight primals_6 = self.fc2.bias primals_7 = self.fc3.weight primals_8 = self.fc3.bias primals_9 = self.fc4.weight primals_10 = self.fc4.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
ivallesp/RL_Tennis
CriticArchitecture
false
3,687
[ "MIT" ]
0
a83933af9c4481d50f735983b4fc3b1f053f71d1
https://github.com/ivallesp/RL_Tennis/tree/a83933af9c4481d50f735983b4fc3b1f053f71d1
MaskedTransformerEncoderLayer
from torch.nn import Module import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Linear from torch.nn import Dropout from torch.nn import LayerNorm from torch.nn import Identity def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False): """ Obtained from: github.com:rwightman/pytorch-image-models Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0],) + (1,) * (x.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x. device) random_tensor.floor_() output = x.div(keep_prob) * random_tensor return output class DropPath(nn.Module): """ Obtained from: github.com:rwightman/pytorch-image-models Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) class MaskedAttention(Module): def __init__(self, dim, num_heads=8, attention_dropout=0.1, projection_dropout=0.1): super().__init__() self.num_heads = num_heads head_dim = dim // self.num_heads self.scale = head_dim ** -0.5 self.qkv = Linear(dim, dim * 3, bias=False) self.attn_drop = Dropout(attention_dropout) self.proj = Linear(dim, dim) self.proj_drop = Dropout(projection_dropout) def forward(self, x, mask=None): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads ).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] attn = q @ k.transpose(-2, -1) * self.scale if mask is not None: mask_value = -torch.finfo(attn.dtype).max assert mask.shape[-1] == attn.shape[-1 ], 'mask has incorrect dimensions' mask = mask[:, None, :] * mask[:, :, None] mask = mask.unsqueeze(1).repeat(1, self.num_heads, 1, 1) attn.masked_fill_(~mask, mask_value) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class MaskedTransformerEncoderLayer(Module): """ Inspired by torch.nn.TransformerEncoderLayer and timm. """ def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, attention_dropout=0.1, drop_path_rate=0.1): super(MaskedTransformerEncoderLayer, self).__init__() self.pre_norm = LayerNorm(d_model) self.self_attn = MaskedAttention(dim=d_model, num_heads=nhead, attention_dropout=attention_dropout, projection_dropout=dropout) self.linear1 = Linear(d_model, dim_feedforward) self.dropout1 = Dropout(dropout) self.norm1 = LayerNorm(d_model) self.linear2 = Linear(dim_feedforward, d_model) self.dropout2 = Dropout(dropout) self.drop_path = DropPath(drop_path_rate ) if drop_path_rate > 0 else Identity() self.activation = F.gelu def forward(self, src: 'torch.Tensor', mask=None, *args, **kwargs ) ->torch.Tensor: src = src + self.drop_path(self.self_attn(self.pre_norm(src), mask)) src = self.norm1(src) src2 = self.linear2(self.dropout1(self.activation(self.linear1(src)))) src = src + self.drop_path(self.dropout2(src2)) return src def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'nhead': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch.nn import Module import torch.nn as nn import torch.nn.functional as F from torch.nn import Linear from torch.nn import Dropout from torch.nn import LayerNorm from torch.nn import Identity assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (8 + y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + 1) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr2 + 2) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr2 + 3) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp10 = tmp7 + tmp9 tmp11 = tmp6 + tmp10 tmp12 = tmp5 + tmp11 tmp17 = tmp14 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp12 + tmp18 tmp24 = tmp21 + tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + x0, tmp28, xmask) tl.store(out_ptr1 + x0, tmp40, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp6 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_gelu_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, None) @triton.jit def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (2048, 4), (4, 1)) assert_size_stride(primals_10, (2048,), (1,)) assert_size_stride(primals_11, (4, 2048), (2048, 1)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_2 buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_2[grid(16, 4)](buf3, buf4, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(16, 4)](buf3, buf5, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(256)](buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused__softmax_5[grid(256)](buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf7 buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_6[grid(16, 4)](buf3, buf9, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf3 buf10 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_7[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0) del buf10 extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf12) buf13 = buf1 del buf1 buf14 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_8[grid(16)](primals_3, buf12, primals_6, buf13, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_9[grid(64)](primals_3, buf12, primals_6, buf13, buf14, primals_7, primals_8, buf15, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf13 del buf14 del primals_8 buf16 = empty_strided_cuda((16, 2048), (2048, 1), torch.float32) extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 2048), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_10 buf17 = empty_strided_cuda((4, 4, 2048), (8192, 2048, 1), torch.float32 ) triton_poi_fused_gelu_10[grid(32768)](buf16, buf17, 32768, XBLOCK= 256, num_warps=4, num_stages=1) buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf17, (16, 2048), (2048, 1), 0), reinterpret_tensor(primals_11, (2048, 4), (1, 2048), 0), out=buf18) buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0) del buf18 triton_poi_fused_add_11[grid(64)](buf19, buf15, primals_12, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 return buf19, primals_3, primals_6, primals_7, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0 ), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0 ), buf16, reinterpret_tensor(buf17, (16, 2048), (2048, 1), 0 ), primals_11, primals_9, primals_5, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_4 def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False): """ Obtained from: github.com:rwightman/pytorch-image-models Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0],) + (1,) * (x.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x. device) random_tensor.floor_() output = x.div(keep_prob) * random_tensor return output class DropPath(nn.Module): """ Obtained from: github.com:rwightman/pytorch-image-models Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) class MaskedAttention(Module): def __init__(self, dim, num_heads=8, attention_dropout=0.1, projection_dropout=0.1): super().__init__() self.num_heads = num_heads head_dim = dim // self.num_heads self.scale = head_dim ** -0.5 self.qkv = Linear(dim, dim * 3, bias=False) self.attn_drop = Dropout(attention_dropout) self.proj = Linear(dim, dim) self.proj_drop = Dropout(projection_dropout) def forward(self, x, mask=None): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads ).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] attn = q @ k.transpose(-2, -1) * self.scale if mask is not None: mask_value = -torch.finfo(attn.dtype).max assert mask.shape[-1] == attn.shape[-1 ], 'mask has incorrect dimensions' mask = mask[:, None, :] * mask[:, :, None] mask = mask.unsqueeze(1).repeat(1, self.num_heads, 1, 1) attn.masked_fill_(~mask, mask_value) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class MaskedTransformerEncoderLayerNew(Module): """ Inspired by torch.nn.TransformerEncoderLayer and timm. """ def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, attention_dropout=0.1, drop_path_rate=0.1): super(MaskedTransformerEncoderLayerNew, self).__init__() self.pre_norm = LayerNorm(d_model) self.self_attn = MaskedAttention(dim=d_model, num_heads=nhead, attention_dropout=attention_dropout, projection_dropout=dropout) self.linear1 = Linear(d_model, dim_feedforward) self.dropout1 = Dropout(dropout) self.norm1 = LayerNorm(d_model) self.linear2 = Linear(dim_feedforward, d_model) self.dropout2 = Dropout(dropout) self.drop_path = DropPath(drop_path_rate ) if drop_path_rate > 0 else Identity() self.activation = F.gelu def forward(self, input_0): primals_1 = self.pre_norm.weight primals_2 = self.pre_norm.bias primals_4 = self.self_attn.qkv.weight primals_5 = self.self_attn.proj.weight primals_6 = self.self_attn.proj.bias primals_9 = self.linear1.weight primals_10 = self.linear1.bias primals_7 = self.norm1.weight primals_8 = self.norm1.bias primals_11 = self.linear2.weight primals_12 = self.linear2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
iliasprc/Compact-Transformers
MaskedTransformerEncoderLayer
false
3,688
[ "Apache-2.0" ]
0
31975a0b4469854dfb0e0cbcedd8f0698cf84a7e
https://github.com/iliasprc/Compact-Transformers/tree/31975a0b4469854dfb0e0cbcedd8f0698cf84a7e
BCELoss2d
import torch import torch.nn as nn import torch.backends.cudnn import torch.utils.data class BCELoss2d(nn.Module): """ Binary Cross Entropy loss function """ def __init__(self): super(BCELoss2d, self).__init__() self.bce_loss = nn.BCEWithLogitsLoss() def forward(self, logits, labels): logits_flat = logits.view(-1) labels_flat = labels.view(-1) return self.bce_loss(logits_flat, labels_flat) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.backends.cudnn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class BCELoss2dNew(nn.Module): """ Binary Cross Entropy loss function """ def __init__(self): super(BCELoss2dNew, self).__init__() self.bce_loss = nn.BCEWithLogitsLoss() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
jayden-chua/image-mask
BCELoss2d
false
3,689
[ "MIT" ]
0
ce2c6a32bf13df582e7b57e506d58518258be292
https://github.com/jayden-chua/image-mask/tree/ce2c6a32bf13df582e7b57e506d58518258be292
BinaryCrossEntropyLoss2d
import torch import torch.nn.functional as F import torch.nn as nn import torch.backends.cudnn import torch.utils.data class BinaryCrossEntropyLoss2d(nn.Module): def __init__(self, weight=None, size_average=True): """ Binary cross entropy loss 2D Args: weight: size_average: """ super(BinaryCrossEntropyLoss2d, self).__init__() self.bce_loss = nn.BCELoss(weight, size_average) def forward(self, logits, targets): probs = F.sigmoid(logits) probs_flat = probs.view(-1) targets_flat = targets.view(-1) return self.bce_loss(probs_flat, targets_flat) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.backends.cudnn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = tl.sigmoid(tmp3) tmp5 = -tmp4 tmp6 = libdevice.log1p(tmp5) tmp7 = -100.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp2 * tmp8 tmp10 = tl_math.log(tmp4) tmp11 = triton_helpers.maximum(tmp10, tmp7) tmp12 = tmp0 * tmp11 tmp13 = tmp9 - tmp12 tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = 256.0 tmp18 = tmp16 / tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_binary_cross_entropy_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class BinaryCrossEntropyLoss2dNew(nn.Module): def __init__(self, weight=None, size_average=True): """ Binary cross entropy loss 2D Args: weight: size_average: """ super(BinaryCrossEntropyLoss2dNew, self).__init__() self.bce_loss = nn.BCELoss(weight, size_average) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
jayden-chua/image-mask
BinaryCrossEntropyLoss2d
false
3,690
[ "MIT" ]
0
ce2c6a32bf13df582e7b57e506d58518258be292
https://github.com/jayden-chua/image-mask/tree/ce2c6a32bf13df582e7b57e506d58518258be292
DiceScore
import torch import torch.nn.functional as F import torch.nn as nn import torch.backends.cudnn import torch.utils.data class DiceScore(nn.Module): def __init__(self, threshold=0.5): super(DiceScore, self).__init__() self.threshold = threshold def forward(self, logits, labels): probs = F.sigmoid(logits) num = labels.size(0) predicts = (probs.view(num, -1) > self.threshold).float() labels = labels.view(num, -1) intersection = predicts * labels score = 2.0 * intersection.sum(1) / (predicts.sum(1) + labels.sum(1)) return score.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.backends.cudnn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused__to_copy_gt_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp5 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp2 = 0.5 tmp3 = tmp1 > tmp2 tmp4 = tmp3.to(tl.float32) tmp6 = tmp4 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tmp15 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tl.store(out_ptr0 + x0, tmp10, xmask) tl.store(out_ptr1 + x0, tmp14, xmask) tl.store(out_ptr2 + x0, tmp18, xmask) @triton.jit def triton_per_fused_add_div_mean_mul_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp4 = tl.load(in_ptr2 + r0, None) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 / tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp10 = 4.0 tmp11 = tmp9 / tmp10 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp11, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) buf1 = empty_strided_cuda((4,), (1,), torch.float32) buf2 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_per_fused__to_copy_gt_mul_sum_0[grid(4)](arg0_1, arg1_1, buf0, buf1, buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused_add_div_mean_mul_1[grid(1)](buf4, buf0, buf1, buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 del buf2 return buf4, class DiceScoreNew(nn.Module): def __init__(self, threshold=0.5): super(DiceScoreNew, self).__init__() self.threshold = threshold def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
jayden-chua/image-mask
DiceScore
false
3,691
[ "MIT" ]
0
ce2c6a32bf13df582e7b57e506d58518258be292
https://github.com/jayden-chua/image-mask/tree/ce2c6a32bf13df582e7b57e506d58518258be292
DisConvModule
import torch import torch.nn as nn from torch.nn.utils import spectral_norm as spectral_norm_fn from torch.nn.utils import weight_norm as weight_norm_fn def dis_conv(input_dim, output_dim, kernel_size=5, stride=2, padding=0, rate=1, activation='lrelu', weight_norm='none'): return Conv2dBlock(input_dim, output_dim, kernel_size, stride, conv_padding=padding, dilation=rate, activation=activation, weight_norm=weight_norm) class Conv2dBlock(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, stride, padding= 0, conv_padding=0, dilation=1, weight_norm='none', norm='none', activation='relu', pad_type='zero', transpose=False): super(Conv2dBlock, self).__init__() self.use_bias = True if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) elif pad_type == 'none': self.pad = None else: assert 0, 'Unsupported padding type: {}'.format(pad_type) norm_dim = output_dim if norm == 'bn': self.norm = nn.BatchNorm2d(norm_dim) elif norm == 'in': self.norm = nn.InstanceNorm2d(norm_dim) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if weight_norm == 'sn': self.weight_norm = spectral_norm_fn elif weight_norm == 'wn': self.weight_norm = weight_norm_fn elif weight_norm == 'none': self.weight_norm = None else: assert 0, 'Unsupported normalization: {}'.format(weight_norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'elu': self.activation = nn.ELU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if transpose: self.conv = nn.ConvTranspose2d(input_dim, output_dim, kernel_size, stride, padding=conv_padding, output_padding= conv_padding, dilation=dilation, bias=self.use_bias) else: self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, padding=conv_padding, dilation=dilation, bias=self. use_bias) if self.weight_norm: self.conv = self.weight_norm(self.conv) def forward(self, x): if self.pad: x = self.conv(self.pad(x)) else: x = self.conv(x) if self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return x class DisConvModule(nn.Module): def __init__(self, input_dim, cnum, weight_norm='none', use_cuda=True, device_id=0): super(DisConvModule, self).__init__() self.use_cuda = use_cuda self.device_id = device_id self.conv1 = dis_conv(input_dim, cnum, 5, 2, 2, weight_norm=weight_norm ) self.conv2 = dis_conv(cnum, cnum * 2, 5, 2, 2, weight_norm=weight_norm) self.conv3 = dis_conv(cnum * 2, cnum * 4, 5, 2, 2, weight_norm= weight_norm) self.conv4 = dis_conv(cnum * 4, cnum * 4, 5, 2, 2, weight_norm= weight_norm) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.conv4(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'cnum': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.nn.utils import spectral_norm as spectral_norm_fn from torch.nn.utils import weight_norm as weight_norm_fn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(in_out_ptr0 + x2, tmp7, xmask) tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 5, 5), (100, 25, 5, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (8, 4, 5, 5), (100, 25, 5, 1)) assert_size_stride(primals_5, (8,), (1,)) assert_size_stride(primals_6, (16, 8, 5, 5), (200, 25, 5, 1)) assert_size_stride(primals_7, (16,), (1,)) assert_size_stride(primals_8, (16, 16, 5, 5), (400, 25, 5, 1)) assert_size_stride(primals_9, (16,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(64)](buf1, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 8, 1, 1), (8, 1, 1, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_leaky_relu_1[grid(32)](buf3, primals_5, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 1, 1), (16, 1, 1, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_leaky_relu_2[grid(64)](buf5, primals_7, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 16, 1, 1), (16, 1, 1, 1)) buf7 = buf6 del buf6 buf8 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 1, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3[grid(64) ](buf7, primals_9, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_9 return (buf7, primals_1, primals_2, primals_4, primals_6, primals_8, buf1, buf3, buf5, buf8) def dis_conv(input_dim, output_dim, kernel_size=5, stride=2, padding=0, rate=1, activation='lrelu', weight_norm='none'): return Conv2dBlock(input_dim, output_dim, kernel_size, stride, conv_padding=padding, dilation=rate, activation=activation, weight_norm=weight_norm) class Conv2dBlock(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, stride, padding= 0, conv_padding=0, dilation=1, weight_norm='none', norm='none', activation='relu', pad_type='zero', transpose=False): super(Conv2dBlock, self).__init__() self.use_bias = True if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) elif pad_type == 'none': self.pad = None else: assert 0, 'Unsupported padding type: {}'.format(pad_type) norm_dim = output_dim if norm == 'bn': self.norm = nn.BatchNorm2d(norm_dim) elif norm == 'in': self.norm = nn.InstanceNorm2d(norm_dim) elif norm == 'none': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if weight_norm == 'sn': self.weight_norm = spectral_norm_fn elif weight_norm == 'wn': self.weight_norm = weight_norm_fn elif weight_norm == 'none': self.weight_norm = None else: assert 0, 'Unsupported normalization: {}'.format(weight_norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'elu': self.activation = nn.ELU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if transpose: self.conv = nn.ConvTranspose2d(input_dim, output_dim, kernel_size, stride, padding=conv_padding, output_padding= conv_padding, dilation=dilation, bias=self.use_bias) else: self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, padding=conv_padding, dilation=dilation, bias=self. use_bias) if self.weight_norm: self.conv = self.weight_norm(self.conv) def forward(self, x): if self.pad: x = self.conv(self.pad(x)) else: x = self.conv(x) if self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return x class DisConvModuleNew(nn.Module): def __init__(self, input_dim, cnum, weight_norm='none', use_cuda=True, device_id=0): super(DisConvModuleNew, self).__init__() self.use_cuda = use_cuda self.device_id = device_id self.conv1 = dis_conv(input_dim, cnum, 5, 2, 2, weight_norm=weight_norm ) self.conv2 = dis_conv(cnum, cnum * 2, 5, 2, 2, weight_norm=weight_norm) self.conv3 = dis_conv(cnum * 2, cnum * 4, 5, 2, 2, weight_norm= weight_norm) self.conv4 = dis_conv(cnum * 4, cnum * 4, 5, 2, 2, weight_norm= weight_norm) def forward(self, input_0): primals_2 = self.conv1.conv.weight primals_3 = self.conv1.conv.bias primals_4 = self.conv2.conv.weight primals_5 = self.conv2.conv.bias primals_6 = self.conv3.conv.weight primals_7 = self.conv3.conv.bias primals_8 = self.conv4.conv.weight primals_9 = self.conv4.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
jacobwjs/generative-inpainting-pytorch
DisConvModule
false
3,692
[ "MIT" ]
0
5cd5e818aa7394444b6c21df448d8b395492e4d7
https://github.com/jacobwjs/generative-inpainting-pytorch/tree/5cd5e818aa7394444b6c21df448d8b395492e4d7
RelativeMultiHeadAttention
import math import torch import torch.nn.functional as F import torch.nn as nn class RelativeMultiHeadAttention(nn.Module): def __init__(self, channels, num_heads, dropout): super(RelativeMultiHeadAttention, self).__init__() assert channels % num_heads == 0, 'd_model % num_heads should be zero.' self.channels = channels self.inner_channels = channels // num_heads self.num_heads = num_heads self.sqrt_dim = math.sqrt(channels) self.query_proj = nn.Conv1d(channels, channels, 1) self.key_proj = nn.Conv1d(channels, channels, 1) self.value_proj = nn.Conv1d(channels, channels, 1) self.pos_proj = nn.Conv1d(channels, channels, 1, bias=False) self.dropout = nn.Dropout(p=dropout) self.u_bias = nn.Parameter(torch.Tensor(self.num_heads, self. inner_channels)) self.v_bias = nn.Parameter(torch.Tensor(self.num_heads, self. inner_channels)) torch.nn.init.xavier_uniform_(self.u_bias) torch.nn.init.xavier_uniform_(self.v_bias) self.out_proj = nn.Conv1d(channels, channels, 1) def forward(self, query, key, value, pos_embedding, mask=None): B = value.size(0) query = self.query_proj(query).view(B, self.num_heads, self. inner_channels, -1) key = self.key_proj(key).view(B, self.num_heads, self. inner_channels, -1) value = self.value_proj(value).view(B, self.num_heads, self. inner_channels, -1) B_pos = pos_embedding.size(0) pos_emb = self.pos_proj(pos_embedding).view(B_pos, self.num_heads, self.inner_channels, -1) content_score = torch.matmul((query + self.u_bias[None, :, :, None] ).transpose(-1, -2), key) pos_score = torch.matmul((query + self.v_bias[None, :, :, None]). transpose(-1, -2), pos_emb) pos_score = self.rel_shift(pos_score) score = (content_score + pos_score) / self.sqrt_dim if mask is not None: score = score.masked_fill(mask == 0, -10000.0) attn_map = F.softmax(score, -1) attn = self.dropout(attn_map) context = torch.matmul(value, attn) context = context.contiguous().view(B, self.channels, -1) return self.out_proj(context) @staticmethod def rel_shift(x): B, H, T1, T2 = x.size() zero_pad = torch.zeros((B, H, T1, 1), device=x.device, dtype=x.dtype) x_padded = torch.cat([zero_pad, x], dim=-1) x_padded = x_padded.view(B, H, T2 + 1, T1) x = x_padded[:, :, 1:].view_as(x)[:, :, :, :T2 // 2 + 1] return x def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'num_heads': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp2 + tmp5 tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 1, tl.int64) tl.full([1], 0, tl.int64) tmp4 = tmp1 < tmp1 tmp5 = 0.0 tmp6 = tl.full(tmp5.shape, 0.0, tmp5.dtype) tmp7 = tl.where(tmp4, tmp5, tmp6) tmp8 = tmp1 >= tmp1 tl.full([1], 2, tl.int64) tmp11 = tl.load(in_ptr0 + x0, tmp8 & xmask, other=0.0) tmp12 = tl.where(tmp4, tmp7, tmp11) tmp13 = tmp0 + tmp12 tmp14 = 1.0 tmp15 = tmp13 * tmp14 tmp16 = tmp15 - tmp15 tmp17 = 0.5 tmp18 = tmp16 * tmp17 tmp19 = tl_math.exp(tmp18) tmp20 = tmp19 / tmp19 tl.store(in_out_ptr0 + x0, tmp20, xmask) @triton.jit def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_12, (4, 1), (1, 1)) assert_size_stride(primals_13, (4, 1), (1, 1)) assert_size_stride(primals_14, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_15, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_4, (1, 4, 4), (16, 4, 1), 0), primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4), (16, 4, 1)) buf1 = extern_kernels.convolution(reinterpret_tensor(primals_7, (1, 4, 4), (16, 4, 1), 0), primals_5, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (1, 4, 4), (16, 4, 1)) buf2 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4), (16, 4, 1), 0), primals_8, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (1, 4, 4), (16, 4, 1)) buf3 = extern_kernels.convolution(reinterpret_tensor(primals_10, (1, 4, 4), (16, 4, 1), 0), primals_11, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf3, (1, 4, 4), (16, 4, 1)) buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf7 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(16)](buf0, primals_3, primals_12, primals_13, buf4, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_12 del primals_13 del primals_3 buf5 = buf1 del buf1 triton_poi_fused_convolution_1[grid(16)](buf5, primals_6, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_6 buf6 = reinterpret_tensor(buf0, (16, 1, 1), (1, 1, 1), 0) del buf0 extern_kernels.bmm(reinterpret_tensor(buf4, (16, 1, 1), (1, 0, 0), 0), reinterpret_tensor(buf5, (16, 1, 1), (1, 0, 0), 0), out=buf6) buf8 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf7, (16, 1, 1), (1, 0, 0), 0), reinterpret_tensor(buf3, (16, 1, 1), (1, 1, 1), 0), out=buf8) buf9 = reinterpret_tensor(buf6, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf6 triton_poi_fused__softmax_add_2[grid(16)](buf9, buf8, 16, XBLOCK=16, num_warps=1, num_stages=1) buf10 = buf2 del buf2 triton_poi_fused_convolution_1[grid(16)](buf10, primals_9, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_9 buf11 = buf8 del buf8 extern_kernels.bmm(reinterpret_tensor(buf10, (16, 1, 1), (1, 0, 0), 0), reinterpret_tensor(buf9, (16, 1, 1), (1, 1, 1), 0), out=buf11) buf12 = extern_kernels.convolution(reinterpret_tensor(buf11, (4, 4, 1), (4, 1, 1), 0), primals_14, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf12, (4, 4, 1), (4, 1, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_3[grid(16)](buf13, primals_15, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_15 return (buf13, primals_2, primals_5, primals_8, primals_11, primals_14, reinterpret_tensor(primals_4, (1, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_7, (1, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (1, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_10, (1, 4, 4), (16, 4, 1), 0), buf9, reinterpret_tensor(buf11, (4, 4, 1), (4, 1, 1), 0), reinterpret_tensor(buf10, (16, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf7, (16, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf5, (16, 1, 1), (1, 1, 1), 0)) class RelativeMultiHeadAttentionNew(nn.Module): def __init__(self, channels, num_heads, dropout): super(RelativeMultiHeadAttentionNew, self).__init__() assert channels % num_heads == 0, 'd_model % num_heads should be zero.' self.channels = channels self.inner_channels = channels // num_heads self.num_heads = num_heads self.sqrt_dim = math.sqrt(channels) self.query_proj = nn.Conv1d(channels, channels, 1) self.key_proj = nn.Conv1d(channels, channels, 1) self.value_proj = nn.Conv1d(channels, channels, 1) self.pos_proj = nn.Conv1d(channels, channels, 1, bias=False) self.dropout = nn.Dropout(p=dropout) self.u_bias = nn.Parameter(torch.Tensor(self.num_heads, self. inner_channels)) self.v_bias = nn.Parameter(torch.Tensor(self.num_heads, self. inner_channels)) torch.nn.init.xavier_uniform_(self.u_bias) torch.nn.init.xavier_uniform_(self.v_bias) self.out_proj = nn.Conv1d(channels, channels, 1) @staticmethod def rel_shift(x): B, H, T1, T2 = x.size() zero_pad = torch.zeros((B, H, T1, 1), device=x.device, dtype=x.dtype) x_padded = torch.cat([zero_pad, x], dim=-1) x_padded = x_padded.view(B, H, T2 + 1, T1) x = x_padded[:, :, 1:].view_as(x)[:, :, :, :T2 // 2 + 1] return x def forward(self, input_0, input_1, input_2, input_3): primals_12 = self.u_bias primals_13 = self.v_bias primals_2 = self.query_proj.weight primals_3 = self.query_proj.bias primals_5 = self.key_proj.weight primals_6 = self.key_proj.bias primals_8 = self.value_proj.weight primals_9 = self.value_proj.bias primals_11 = self.pos_proj.weight primals_14 = self.out_proj.weight primals_15 = self.out_proj.bias primals_1 = input_0 primals_4 = input_1 primals_7 = input_2 primals_10 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return output[0]
ishine/tfm-tts
RelativeMultiHeadAttention
false
3,693
[ "MIT" ]
0
a964736467851ddec8f8e8933b9550cbe7d7d7eb
https://github.com/ishine/tfm-tts/tree/a964736467851ddec8f8e8933b9550cbe7d7d7eb
WeightedSoftDiceLoss
import torch import torch.nn.functional as F import torch.nn as nn import torch.backends.cudnn import torch.utils.data class WeightedSoftDiceLoss(nn.Module): def __init__(self): super(WeightedSoftDiceLoss, self).__init__() def forward(self, logits, labels, weights): probs = F.sigmoid(logits) num = labels.size(0) w = weights.view(num, -1) w2 = w * w m1 = probs.view(num, -1) m2 = labels.view(num, -1) intersection = m1 * m2 score = 2.0 * ((w2 * intersection).sum(1) + 1) / ((w2 * m1).sum(1) + (w2 * m2).sum(1) + 1) score = 1 - score.sum() / num return score def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.backends.cudnn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp4 = tl.load(in_ptr2 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tmp0 * tmp0 tmp3 = tl.sigmoid(tmp2) tmp5 = tmp3 * tmp4 tmp6 = tmp1 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp1 * tmp3 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.where(xmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tmp16 = tmp1 * tmp4 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.where(xmask, tmp17, 0) tmp20 = tl.sum(tmp19, 1)[:, None] tl.store(out_ptr0 + x0, tmp10, xmask) tl.store(out_ptr1 + x0, tmp15, xmask) tl.store(out_ptr2 + x0, tmp20, xmask) @triton.jit def triton_per_fused_add_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp5 = tl.load(in_ptr1 + r0, None) tmp6 = tl.load(in_ptr2 + r0, None) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp7 + tmp1 tmp9 = tmp4 / tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.sum(tmp10, 1)[:, None] tmp13 = 0.25 tmp14 = tmp12 * tmp13 tmp15 = tmp1 - tmp14 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp15, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) buf1 = empty_strided_cuda((4,), (1,), torch.float32) buf2 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_mul_sum_0[grid(4)](arg2_1, arg0_1, arg1_1, buf0, buf1, buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused_add_div_mul_rsub_sum_1[grid(1)](buf4, buf0, buf1, buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 del buf2 return buf4, class WeightedSoftDiceLossNew(nn.Module): def __init__(self): super(WeightedSoftDiceLossNew, self).__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
jayden-chua/image-mask
WeightedSoftDiceLoss
false
3,694
[ "MIT" ]
0
ce2c6a32bf13df582e7b57e506d58518258be292
https://github.com/jayden-chua/image-mask/tree/ce2c6a32bf13df582e7b57e506d58518258be292
QuaternionLinear
from torch.nn import Module import torch import numpy as np from numpy.random import RandomState from torch.nn.parameter import Parameter def quaternion_init(in_features, out_features, rng, kernel_size=None, criterion='glorot'): if kernel_size is not None: receptive_field = np.prod(kernel_size) fan_in = in_features * receptive_field fan_out = out_features * receptive_field else: fan_in = in_features fan_out = out_features if criterion == 'glorot': s = 1.0 / np.sqrt(2 * (fan_in + fan_out)) elif criterion == 'he': s = 1.0 / np.sqrt(2 * fan_in) else: raise ValueError('Invalid criterion: ' + criterion) rng = RandomState(123) if kernel_size is None: kernel_shape = in_features, out_features elif type(kernel_size) is int: kernel_shape = (out_features, in_features) + tuple((kernel_size,)) else: kernel_shape = (out_features, in_features) + (*kernel_size,) number_of_weights = np.prod(kernel_shape) v_i = np.random.normal(0.0, s, number_of_weights) v_j = np.random.normal(0.0, s, number_of_weights) v_k = np.random.normal(0.0, s, number_of_weights) for i in range(0, number_of_weights): norm = np.sqrt(v_i[i] ** 2 + v_j[i] ** 2 + v_k[i] ** 2) + 0.0001 v_i[i] /= norm v_j[i] /= norm v_k[i] /= norm v_i = v_i.reshape(kernel_shape) v_j = v_j.reshape(kernel_shape) v_k = v_k.reshape(kernel_shape) modulus = rng.uniform(low=-s, high=s, size=kernel_shape) phase = rng.uniform(low=-np.pi, high=np.pi, size=kernel_shape) weight_r = modulus * np.cos(phase) weight_i = modulus * v_i * np.sin(phase) weight_j = modulus * v_j * np.sin(phase) weight_k = modulus * v_k * np.sin(phase) return weight_r, weight_i, weight_j, weight_k def unitary_init(in_features, out_features, rng, kernel_size=None, criterion='he'): if kernel_size is not None: receptive_field = np.prod(kernel_size) fan_in = in_features * receptive_field fan_out = out_features * receptive_field else: fan_in = in_features fan_out = out_features if criterion == 'glorot': s = 1.0 / np.sqrt(2 * (fan_in + fan_out)) elif criterion == 'he': s = 1.0 / np.sqrt(2 * fan_in) else: raise ValueError('Invalid criterion: ' + criterion) if kernel_size is None: kernel_shape = in_features, out_features elif type(kernel_size) is int: kernel_shape = (out_features, in_features) + tuple((kernel_size,)) else: kernel_shape = (out_features, in_features) + (*kernel_size,) number_of_weights = np.prod(kernel_shape) v_r = np.random.normal(0.0, s, number_of_weights) v_i = np.random.normal(0.0, s, number_of_weights) v_j = np.random.normal(0.0, s, number_of_weights) v_k = np.random.normal(0.0, s, number_of_weights) for i in range(0, number_of_weights): norm = np.sqrt(v_r[i] ** 2 + v_i[i] ** 2 + v_j[i] ** 2 + v_k[i] ** 2 ) + 0.0001 v_r[i] /= norm v_i[i] /= norm v_j[i] /= norm v_k[i] /= norm v_r = v_r.reshape(kernel_shape) v_i = v_i.reshape(kernel_shape) v_j = v_j.reshape(kernel_shape) v_k = v_k.reshape(kernel_shape) return v_r, v_i, v_j, v_k def affect_init(r_weight, i_weight, j_weight, k_weight, init_func, rng, init_criterion): if r_weight.size() != i_weight.size() or r_weight.size() != j_weight.size( ) or r_weight.size() != k_weight.size(): raise ValueError( 'The real and imaginary weights should have the same size. Found:' + ' r:' + str(r_weight.size()) + ' i:' + str(i_weight.size()) + ' j:' + str(j_weight.size()) + ' k:' + str(k_weight.size())) elif r_weight.dim() != 2: raise Exception( 'affect_init accepts only matrices. Found dimension = ' + str( r_weight.dim())) kernel_size = None r, i, j, k = init_func(r_weight.size(0), r_weight.size(1), rng, kernel_size, init_criterion) r, i, j, k = torch.from_numpy(r), torch.from_numpy(i), torch.from_numpy(j ), torch.from_numpy(k) r_weight.data = r.type_as(r_weight.data) i_weight.data = i.type_as(i_weight.data) j_weight.data = j.type_as(j_weight.data) k_weight.data = k.type_as(k_weight.data) def check_input(input): if input.dim() not in {2, 3}: raise RuntimeError( 'quaternion linear accepts only input of dimension 2 or 3. input.dim = ' + str(input.dim())) nb_hidden = input.size()[-1] if nb_hidden % 4 != 0: raise RuntimeError( 'Quaternion Tensors must be divisible by 4. input.size()[1] = ' + str(nb_hidden)) def get_i(input): check_input(input) nb_hidden = input.size()[-1] if input.dim() == 2: return input.narrow(1, nb_hidden // 4, nb_hidden // 4) if input.dim() == 3: return input.narrow(2, nb_hidden // 4, nb_hidden // 4) def get_j(input): check_input(input) nb_hidden = input.size()[-1] if input.dim() == 2: return input.narrow(1, nb_hidden // 2, nb_hidden // 4) if input.dim() == 3: return input.narrow(2, nb_hidden // 2, nb_hidden // 4) def get_k(input): check_input(input) nb_hidden = input.size()[-1] if input.dim() == 2: return input.narrow(1, nb_hidden - nb_hidden // 4, nb_hidden // 4) if input.dim() == 3: return input.narrow(2, nb_hidden - nb_hidden // 4, nb_hidden // 4) def get_r(input): check_input(input) nb_hidden = input.size()[-1] if input.dim() == 2: return input.narrow(1, 0, nb_hidden // 4) elif input.dim() == 3: return input.narrow(2, 0, nb_hidden // 4) class QuaternionLinearFunction(torch.autograd.Function): @staticmethod def forward(ctx, input, r_weight, i_weight, j_weight, k_weight, bias=None): ctx.save_for_backward(input, r_weight, i_weight, j_weight, k_weight, bias) check_input(input) cat_kernels_4_r = torch.cat((r_weight, -i_weight, -j_weight, - k_weight), dim=0) cat_kernels_4_i = torch.cat((i_weight, r_weight, -k_weight, j_weight), dim=0) cat_kernels_4_j = torch.cat((j_weight, k_weight, r_weight, - i_weight), dim=0) cat_kernels_4_k = torch.cat((k_weight, -j_weight, i_weight, r_weight), dim=0) cat_kernels_4_quaternion = torch.cat((cat_kernels_4_r, cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k), dim=1) if input.dim() == 2: if bias is not None: return torch.addmm(bias, input, cat_kernels_4_quaternion) else: return torch.mm(input, cat_kernels_4_quaternion) else: output = torch.matmul(input, cat_kernels_4_quaternion) if bias is not None: return output + bias else: return output @staticmethod def backward(ctx, grad_output): input, r_weight, i_weight, j_weight, k_weight, _bias = (ctx. saved_tensors) (grad_input) = (grad_weight_r) = (grad_weight_i) = (grad_weight_j) = ( grad_weight_k) = (grad_bias) = None input_r = torch.cat((r_weight, -i_weight, -j_weight, -k_weight), dim=0) input_i = torch.cat((i_weight, r_weight, -k_weight, j_weight), dim=0) input_j = torch.cat((j_weight, k_weight, r_weight, -i_weight), dim=0) input_k = torch.cat((k_weight, -j_weight, i_weight, r_weight), dim=0) cat_kernels_4_quaternion_T = torch.cat((input_r, input_i, input_j, input_k), dim=1).permute(1, 0) cat_kernels_4_quaternion_T.requires_grad_(False) r = get_r(input) i = get_i(input) j = get_j(input) k = get_k(input) input_r = torch.cat((r, -i, -j, -k), dim=0) input_i = torch.cat((i, r, -k, j), dim=0) input_j = torch.cat((j, k, r, -i), dim=0) input_k = torch.cat((k, -j, i, r), dim=0) input_mat = torch.cat((input_r, input_i, input_j, input_k), dim=1) input_mat.requires_grad_(False) r = get_r(grad_output) i = get_i(grad_output) j = get_j(grad_output) k = get_k(grad_output) input_r = torch.cat((r, i, j, k), dim=1) input_i = torch.cat((-i, r, k, -j), dim=1) input_j = torch.cat((-j, -k, r, i), dim=1) input_k = torch.cat((-k, j, -i, r), dim=1) grad_mat = torch.cat((input_r, input_i, input_j, input_k), dim=0) if ctx.needs_input_grad[0]: grad_input = grad_output.mm(cat_kernels_4_quaternion_T) if ctx.needs_input_grad[1]: grad_weight = grad_mat.permute(1, 0).mm(input_mat).permute(1, 0) unit_size_x = r_weight.size(0) unit_size_y = r_weight.size(1) grad_weight_r = grad_weight.narrow(0, 0, unit_size_x).narrow(1, 0, unit_size_y) grad_weight_i = grad_weight.narrow(0, 0, unit_size_x).narrow(1, unit_size_y, unit_size_y) grad_weight_j = grad_weight.narrow(0, 0, unit_size_x).narrow(1, unit_size_y * 2, unit_size_y) grad_weight_k = grad_weight.narrow(0, 0, unit_size_x).narrow(1, unit_size_y * 3, unit_size_y) if ctx.needs_input_grad[5]: grad_bias = grad_output.sum(0).squeeze(0) return (grad_input, grad_weight_r, grad_weight_i, grad_weight_j, grad_weight_k, grad_bias) class QuaternionLinear(Module): """Applies a quaternion linear transformation to the incoming data. """ def __init__(self, in_features, out_features, bias=True, init_criterion ='glorot', weight_init='quaternion', seed=None): super(QuaternionLinear, self).__init__() self.in_features = in_features // 4 self.out_features = out_features // 4 self.r_weight = Parameter(torch.Tensor(self.in_features, self. out_features)) self.i_weight = Parameter(torch.Tensor(self.in_features, self. out_features)) self.j_weight = Parameter(torch.Tensor(self.in_features, self. out_features)) self.k_weight = Parameter(torch.Tensor(self.in_features, self. out_features)) if bias: self.bias = Parameter(torch.Tensor(self.out_features * 4)) else: self.register_parameter('bias', None) self.init_criterion = init_criterion self.weight_init = weight_init self.seed = seed if seed is not None else np.random.randint(0, 1234) self.rng = RandomState(self.seed) self.reset_parameters() def reset_parameters(self): winit = {'quaternion': quaternion_init, 'unitary': unitary_init}[self .weight_init] if self.bias is not None: self.bias.data.fill_(0) affect_init(self.r_weight, self.i_weight, self.j_weight, self. k_weight, winit, self.rng, self.init_criterion) def forward(self, input): if input.dim() == 3: T, N, C = input.size() input = input.contiguous().view(T * N, C) output = QuaternionLinearFunction.apply(input, self.r_weight, self.i_weight, self.j_weight, self.k_weight, self.bias) output = output.view(T, N, output.size(1)) elif input.dim() == 2: output = QuaternionLinearFunction.apply(input, self.r_weight, self.i_weight, self.j_weight, self.k_weight, self.bias) else: raise NotImplementedError return output def __repr__(self): return self.__class__.__name__ + '(' + 'in_features=' + str(self. in_features) + ', out_features=' + str(self.out_features ) + ', bias=' + str(self.bias is not None ) + ', init_criterion=' + str(self.init_criterion ) + ', weight_init=' + str(self.weight_init) + ', seed=' + str(self .seed) + ')' def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module import numpy as np from numpy.random import RandomState from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp9 = tl.load(in_ptr0 + 0) tmp10 = tl.broadcast_to(tmp9, [XBLOCK]) tmp16 = tl.load(in_ptr1 + 0) tmp17 = tl.broadcast_to(tmp16, [XBLOCK]) tmp26 = tl.load(in_ptr2 + 0) tmp27 = tl.broadcast_to(tmp26, [XBLOCK]) tmp35 = tl.load(in_ptr3 + 0) tmp36 = tl.broadcast_to(tmp35, [XBLOCK]) tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = x1 tmp7 = tmp5 < tmp3 tmp7 & tmp4 tmp11 = tmp5 >= tmp3 tmp12 = tl.full([1], 2, tl.int64) tmp13 = tmp5 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tmp14 & tmp4 tmp18 = -tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp15, tmp18, tmp19) tmp21 = tmp5 >= tmp12 tmp22 = tl.full([1], 3, tl.int64) tmp23 = tmp5 < tmp22 tmp24 = tmp21 & tmp23 tmp25 = tmp24 & tmp4 tmp28 = -tmp27 tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype) tmp30 = tl.where(tmp25, tmp28, tmp29) tmp31 = tmp5 >= tmp22 tl.full([1], 4, tl.int64) tmp34 = tmp31 & tmp4 tmp37 = -tmp36 tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype) tmp39 = tl.where(tmp34, tmp37, tmp38) tmp40 = tl.where(tmp24, tmp30, tmp39) tmp41 = tl.where(tmp14, tmp20, tmp40) tmp42 = tl.where(tmp7, tmp10, tmp41) tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype) tmp44 = tl.where(tmp4, tmp42, tmp43) tmp45 = tmp0 >= tmp3 tmp46 = tmp0 < tmp12 tmp47 = tmp45 & tmp46 tmp7 & tmp47 tmp14 & tmp47 tmp50 = tmp24 & tmp47 tmp51 = tl.where(tmp50, tmp37, tmp38) tmp31 & tmp47 tmp53 = tl.where(tmp24, tmp51, tmp27) tmp54 = tl.where(tmp14, tmp10, tmp53) tmp55 = tl.where(tmp7, tmp17, tmp54) tmp56 = tl.full(tmp55.shape, 0.0, tmp55.dtype) tmp57 = tl.where(tmp47, tmp55, tmp56) tmp58 = tmp0 >= tmp12 tmp59 = tmp0 < tmp22 tmp60 = tmp58 & tmp59 tmp7 & tmp60 tmp14 & tmp60 tmp24 & tmp60 tmp64 = tmp31 & tmp60 tmp65 = tl.where(tmp64, tmp18, tmp19) tmp66 = tl.where(tmp24, tmp10, tmp65) tmp67 = tl.where(tmp14, tmp36, tmp66) tmp68 = tl.where(tmp7, tmp27, tmp67) tmp69 = tl.full(tmp68.shape, 0.0, tmp68.dtype) tmp70 = tl.where(tmp60, tmp68, tmp69) tmp71 = tmp0 >= tmp22 tmp7 & tmp71 tmp74 = tmp14 & tmp71 tmp75 = tl.where(tmp74, tmp28, tmp29) tmp24 & tmp71 tmp31 & tmp71 tmp78 = tl.where(tmp24, tmp17, tmp10) tmp79 = tl.where(tmp14, tmp75, tmp78) tmp80 = tl.where(tmp7, tmp36, tmp79) tmp81 = tl.full(tmp80.shape, 0.0, tmp80.dtype) tmp82 = tl.where(tmp71, tmp80, tmp81) tmp83 = tl.where(tmp60, tmp70, tmp82) tmp84 = tl.where(tmp47, tmp57, tmp83) tmp85 = tl.where(tmp4, tmp44, tmp84) tl.store(out_ptr0 + x2, tmp85, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (1, 1), (1, 1)) assert_size_stride(primals_3, (1, 1), (1, 1)) assert_size_stride(primals_4, (1, 1), (1, 1)) assert_size_stride(primals_5, (1, 1), (1, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(16)](primals_2, primals_3, primals_4, primals_5, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 del primals_3 del primals_4 del primals_5 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, primals_1, buf0, alpha=1, beta=1, out=buf1) del buf0 del primals_6 return buf1, primals_1 def quaternion_init(in_features, out_features, rng, kernel_size=None, criterion='glorot'): if kernel_size is not None: receptive_field = np.prod(kernel_size) fan_in = in_features * receptive_field fan_out = out_features * receptive_field else: fan_in = in_features fan_out = out_features if criterion == 'glorot': s = 1.0 / np.sqrt(2 * (fan_in + fan_out)) elif criterion == 'he': s = 1.0 / np.sqrt(2 * fan_in) else: raise ValueError('Invalid criterion: ' + criterion) rng = RandomState(123) if kernel_size is None: kernel_shape = in_features, out_features elif type(kernel_size) is int: kernel_shape = (out_features, in_features) + tuple((kernel_size,)) else: kernel_shape = (out_features, in_features) + (*kernel_size,) number_of_weights = np.prod(kernel_shape) v_i = np.random.normal(0.0, s, number_of_weights) v_j = np.random.normal(0.0, s, number_of_weights) v_k = np.random.normal(0.0, s, number_of_weights) for i in range(0, number_of_weights): norm = np.sqrt(v_i[i] ** 2 + v_j[i] ** 2 + v_k[i] ** 2) + 0.0001 v_i[i] /= norm v_j[i] /= norm v_k[i] /= norm v_i = v_i.reshape(kernel_shape) v_j = v_j.reshape(kernel_shape) v_k = v_k.reshape(kernel_shape) modulus = rng.uniform(low=-s, high=s, size=kernel_shape) phase = rng.uniform(low=-np.pi, high=np.pi, size=kernel_shape) weight_r = modulus * np.cos(phase) weight_i = modulus * v_i * np.sin(phase) weight_j = modulus * v_j * np.sin(phase) weight_k = modulus * v_k * np.sin(phase) return weight_r, weight_i, weight_j, weight_k def unitary_init(in_features, out_features, rng, kernel_size=None, criterion='he'): if kernel_size is not None: receptive_field = np.prod(kernel_size) fan_in = in_features * receptive_field fan_out = out_features * receptive_field else: fan_in = in_features fan_out = out_features if criterion == 'glorot': s = 1.0 / np.sqrt(2 * (fan_in + fan_out)) elif criterion == 'he': s = 1.0 / np.sqrt(2 * fan_in) else: raise ValueError('Invalid criterion: ' + criterion) if kernel_size is None: kernel_shape = in_features, out_features elif type(kernel_size) is int: kernel_shape = (out_features, in_features) + tuple((kernel_size,)) else: kernel_shape = (out_features, in_features) + (*kernel_size,) number_of_weights = np.prod(kernel_shape) v_r = np.random.normal(0.0, s, number_of_weights) v_i = np.random.normal(0.0, s, number_of_weights) v_j = np.random.normal(0.0, s, number_of_weights) v_k = np.random.normal(0.0, s, number_of_weights) for i in range(0, number_of_weights): norm = np.sqrt(v_r[i] ** 2 + v_i[i] ** 2 + v_j[i] ** 2 + v_k[i] ** 2 ) + 0.0001 v_r[i] /= norm v_i[i] /= norm v_j[i] /= norm v_k[i] /= norm v_r = v_r.reshape(kernel_shape) v_i = v_i.reshape(kernel_shape) v_j = v_j.reshape(kernel_shape) v_k = v_k.reshape(kernel_shape) return v_r, v_i, v_j, v_k def affect_init(r_weight, i_weight, j_weight, k_weight, init_func, rng, init_criterion): if r_weight.size() != i_weight.size() or r_weight.size() != j_weight.size( ) or r_weight.size() != k_weight.size(): raise ValueError( 'The real and imaginary weights should have the same size. Found:' + ' r:' + str(r_weight.size()) + ' i:' + str(i_weight.size()) + ' j:' + str(j_weight.size()) + ' k:' + str(k_weight.size())) elif r_weight.dim() != 2: raise Exception( 'affect_init accepts only matrices. Found dimension = ' + str( r_weight.dim())) kernel_size = None r, i, j, k = init_func(r_weight.size(0), r_weight.size(1), rng, kernel_size, init_criterion) r, i, j, k = torch.from_numpy(r), torch.from_numpy(i), torch.from_numpy(j ), torch.from_numpy(k) r_weight.data = r.type_as(r_weight.data) i_weight.data = i.type_as(i_weight.data) j_weight.data = j.type_as(j_weight.data) k_weight.data = k.type_as(k_weight.data) def check_input(input): if input.dim() not in {2, 3}: raise RuntimeError( 'quaternion linear accepts only input of dimension 2 or 3. input.dim = ' + str(input.dim())) nb_hidden = input.size()[-1] if nb_hidden % 4 != 0: raise RuntimeError( 'Quaternion Tensors must be divisible by 4. input.size()[1] = ' + str(nb_hidden)) def get_i(input): check_input(input) nb_hidden = input.size()[-1] if input.dim() == 2: return input.narrow(1, nb_hidden // 4, nb_hidden // 4) if input.dim() == 3: return input.narrow(2, nb_hidden // 4, nb_hidden // 4) def get_j(input): check_input(input) nb_hidden = input.size()[-1] if input.dim() == 2: return input.narrow(1, nb_hidden // 2, nb_hidden // 4) if input.dim() == 3: return input.narrow(2, nb_hidden // 2, nb_hidden // 4) def get_k(input): check_input(input) nb_hidden = input.size()[-1] if input.dim() == 2: return input.narrow(1, nb_hidden - nb_hidden // 4, nb_hidden // 4) if input.dim() == 3: return input.narrow(2, nb_hidden - nb_hidden // 4, nb_hidden // 4) def get_r(input): check_input(input) nb_hidden = input.size()[-1] if input.dim() == 2: return input.narrow(1, 0, nb_hidden // 4) elif input.dim() == 3: return input.narrow(2, 0, nb_hidden // 4) class QuaternionLinearFunction(torch.autograd.Function): @staticmethod def forward(ctx, input, r_weight, i_weight, j_weight, k_weight, bias=None): ctx.save_for_backward(input, r_weight, i_weight, j_weight, k_weight, bias) check_input(input) cat_kernels_4_r = torch.cat((r_weight, -i_weight, -j_weight, - k_weight), dim=0) cat_kernels_4_i = torch.cat((i_weight, r_weight, -k_weight, j_weight), dim=0) cat_kernels_4_j = torch.cat((j_weight, k_weight, r_weight, - i_weight), dim=0) cat_kernels_4_k = torch.cat((k_weight, -j_weight, i_weight, r_weight), dim=0) cat_kernels_4_quaternion = torch.cat((cat_kernels_4_r, cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k), dim=1) if input.dim() == 2: if bias is not None: return torch.addmm(bias, input, cat_kernels_4_quaternion) else: return torch.mm(input, cat_kernels_4_quaternion) else: output = torch.matmul(input, cat_kernels_4_quaternion) if bias is not None: return output + bias else: return output @staticmethod def backward(ctx, grad_output): input, r_weight, i_weight, j_weight, k_weight, _bias = (ctx. saved_tensors) (grad_input) = (grad_weight_r) = (grad_weight_i) = (grad_weight_j) = ( grad_weight_k) = (grad_bias) = None input_r = torch.cat((r_weight, -i_weight, -j_weight, -k_weight), dim=0) input_i = torch.cat((i_weight, r_weight, -k_weight, j_weight), dim=0) input_j = torch.cat((j_weight, k_weight, r_weight, -i_weight), dim=0) input_k = torch.cat((k_weight, -j_weight, i_weight, r_weight), dim=0) cat_kernels_4_quaternion_T = torch.cat((input_r, input_i, input_j, input_k), dim=1).permute(1, 0) cat_kernels_4_quaternion_T.requires_grad_(False) r = get_r(input) i = get_i(input) j = get_j(input) k = get_k(input) input_r = torch.cat((r, -i, -j, -k), dim=0) input_i = torch.cat((i, r, -k, j), dim=0) input_j = torch.cat((j, k, r, -i), dim=0) input_k = torch.cat((k, -j, i, r), dim=0) input_mat = torch.cat((input_r, input_i, input_j, input_k), dim=1) input_mat.requires_grad_(False) r = get_r(grad_output) i = get_i(grad_output) j = get_j(grad_output) k = get_k(grad_output) input_r = torch.cat((r, i, j, k), dim=1) input_i = torch.cat((-i, r, k, -j), dim=1) input_j = torch.cat((-j, -k, r, i), dim=1) input_k = torch.cat((-k, j, -i, r), dim=1) grad_mat = torch.cat((input_r, input_i, input_j, input_k), dim=0) if ctx.needs_input_grad[0]: grad_input = grad_output.mm(cat_kernels_4_quaternion_T) if ctx.needs_input_grad[1]: grad_weight = grad_mat.permute(1, 0).mm(input_mat).permute(1, 0) unit_size_x = r_weight.size(0) unit_size_y = r_weight.size(1) grad_weight_r = grad_weight.narrow(0, 0, unit_size_x).narrow(1, 0, unit_size_y) grad_weight_i = grad_weight.narrow(0, 0, unit_size_x).narrow(1, unit_size_y, unit_size_y) grad_weight_j = grad_weight.narrow(0, 0, unit_size_x).narrow(1, unit_size_y * 2, unit_size_y) grad_weight_k = grad_weight.narrow(0, 0, unit_size_x).narrow(1, unit_size_y * 3, unit_size_y) if ctx.needs_input_grad[5]: grad_bias = grad_output.sum(0).squeeze(0) return (grad_input, grad_weight_r, grad_weight_i, grad_weight_j, grad_weight_k, grad_bias) class QuaternionLinearNew(Module): """Applies a quaternion linear transformation to the incoming data. """ def __init__(self, in_features, out_features, bias=True, init_criterion ='glorot', weight_init='quaternion', seed=None): super(QuaternionLinearNew, self).__init__() self.in_features = in_features // 4 self.out_features = out_features // 4 self.r_weight = Parameter(torch.Tensor(self.in_features, self. out_features)) self.i_weight = Parameter(torch.Tensor(self.in_features, self. out_features)) self.j_weight = Parameter(torch.Tensor(self.in_features, self. out_features)) self.k_weight = Parameter(torch.Tensor(self.in_features, self. out_features)) if bias: self.bias = Parameter(torch.Tensor(self.out_features * 4)) else: self.register_parameter('bias', None) self.init_criterion = init_criterion self.weight_init = weight_init self.seed = seed if seed is not None else np.random.randint(0, 1234) self.rng = RandomState(self.seed) self.reset_parameters() def reset_parameters(self): winit = {'quaternion': quaternion_init, 'unitary': unitary_init}[self .weight_init] if self.bias is not None: self.bias.data.fill_(0) affect_init(self.r_weight, self.i_weight, self.j_weight, self. k_weight, winit, self.rng, self.init_criterion) def __repr__(self): return self.__class__.__name__ + '(' + 'in_features=' + str(self. in_features) + ', out_features=' + str(self.out_features ) + ', bias=' + str(self.bias is not None ) + ', init_criterion=' + str(self.init_criterion ) + ', weight_init=' + str(self.weight_init) + ', seed=' + str(self .seed) + ')' def forward(self, input_0): primals_2 = self.r_weight primals_3 = self.i_weight primals_4 = self.j_weight primals_5 = self.k_weight primals_6 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
ispamm/DualQSELD-TCN
QuaternionLinear
false
3,695
[ "MIT" ]
0
fc5dc8840b4fdd8cb09f8f92e628561417df268a
https://github.com/ispamm/DualQSELD-TCN/tree/fc5dc8840b4fdd8cb09f8f92e628561417df268a
SoftDiceLoss
import torch import torch.nn.functional as F import torch.nn as nn import torch.backends.cudnn import torch.utils.data class SoftDiceLoss(nn.Module): def __init__(self, weight=None, size_average=True): super(SoftDiceLoss, self).__init__() def forward(self, logits, targets): smooth = 1 num = targets.size(0) probs = F.sigmoid(logits) m1 = probs.view(num, -1) m2 = targets.view(num, -1) intersection = m1 * m2 score = 2.0 * (intersection.sum(1) + smooth) / (m1.sum(1) + m2.sum( 1) + smooth) score = 1 - score.sum() / num return score def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.backends.cudnn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp14 = tl.where(xmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tl.store(out_ptr0 + x0, tmp7, xmask) tl.store(out_ptr1 + x0, tmp11, xmask) tl.store(out_ptr2 + x0, tmp15, xmask) @triton.jit def triton_per_fused_add_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp5 = tl.load(in_ptr1 + r0, None) tmp6 = tl.load(in_ptr2 + r0, None) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp7 + tmp1 tmp9 = tmp4 / tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.sum(tmp10, 1)[:, None] tmp13 = 0.25 tmp14 = tmp12 * tmp13 tmp15 = tmp1 - tmp14 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp15, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) buf1 = empty_strided_cuda((4,), (1,), torch.float32) buf2 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_mul_sum_0[grid(4)](arg1_1, arg0_1, buf0, buf1, buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused_add_div_mul_rsub_sum_1[grid(1)](buf4, buf0, buf1, buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 del buf2 return buf4, class SoftDiceLossNew(nn.Module): def __init__(self, weight=None, size_average=True): super(SoftDiceLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
jayden-chua/image-mask
SoftDiceLoss
false
3,696
[ "MIT" ]
0
ce2c6a32bf13df582e7b57e506d58518258be292
https://github.com/jayden-chua/image-mask/tree/ce2c6a32bf13df582e7b57e506d58518258be292
DiceLoss
import torch import torch.nn.functional as F import torch.nn as nn import torch.backends.cudnn import torch.utils.data def dice_loss(preds, trues, weight=None, is_average=True): num = preds.size(0) preds = preds.view(num, -1) trues = trues.view(num, -1) if weight is not None: w = torch.autograd.Variable(weight).view(num, -1) preds = preds * w trues = trues * w intersection = (preds * trues).sum(1) scores = 2.0 * (intersection + 1) / (preds.sum(1) + trues.sum(1) + 1) if is_average: score = scores.sum() / num return torch.clamp(score, 0.0, 1.0) else: return scores class DiceLoss(nn.Module): def __init__(self, size_average=True): super().__init__() self.size_average = size_average def forward(self, input, target, weight=None): return 1 - dice_loss(F.sigmoid(input), target, weight=weight, is_average=self.size_average) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.backends.cudnn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp14 = tl.where(xmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tl.store(out_ptr0 + x0, tmp7, xmask) tl.store(out_ptr1 + x0, tmp11, xmask) tl.store(out_ptr2 + x0, tmp15, xmask) @triton.jit def triton_per_fused_add_clamp_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp5 = tl.load(in_ptr1 + r0, None) tmp6 = tl.load(in_ptr2 + r0, None) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp7 + tmp1 tmp9 = tmp4 / tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.sum(tmp10, 1)[:, None] tmp13 = 0.25 tmp14 = tmp12 * tmp13 tmp15 = 0.0 tmp16 = triton_helpers.maximum(tmp14, tmp15) tmp17 = triton_helpers.minimum(tmp16, tmp1) tmp18 = tmp1 - tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp18, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) buf1 = empty_strided_cuda((4,), (1,), torch.float32) buf2 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_mul_sum_0[grid(4)](arg0_1, arg1_1, buf0, buf1, buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused_add_clamp_div_mul_rsub_sum_1[grid(1)](buf4, buf0, buf1, buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 del buf2 return buf4, def dice_loss(preds, trues, weight=None, is_average=True): num = preds.size(0) preds = preds.view(num, -1) trues = trues.view(num, -1) if weight is not None: w = torch.autograd.Variable(weight).view(num, -1) preds = preds * w trues = trues * w intersection = (preds * trues).sum(1) scores = 2.0 * (intersection + 1) / (preds.sum(1) + trues.sum(1) + 1) if is_average: score = scores.sum() / num return torch.clamp(score, 0.0, 1.0) else: return scores class DiceLossNew(nn.Module): def __init__(self, size_average=True): super().__init__() self.size_average = size_average def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
jayden-chua/image-mask
DiceLoss
false
3,697
[ "MIT" ]
0
ce2c6a32bf13df582e7b57e506d58518258be292
https://github.com/jayden-chua/image-mask/tree/ce2c6a32bf13df582e7b57e506d58518258be292
Conv3BN
import torch import torch.nn as nn import torch.backends.cudnn import torch.utils.data def conv3x3(in_, out): return nn.Conv2d(in_, out, 3, padding=1) class Conv3BN(nn.Module): def __init__(self, in_: 'int', out: 'int', bn=False): super().__init__() self.conv = conv3x3(in_, out) self.bn = nn.BatchNorm2d(out) if bn else None self.activation = nn.SELU(inplace=True) def forward(self, x): x = self.conv(x) if self.bn is not None: x = self.bn(x) x = self.activation(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_': 4, 'out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.backends.cudnn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0507009873554805 tmp6 = tmp2 * tmp5 tmp7 = 1.0 tmp8 = tmp2 * tmp7 tmp9 = libdevice.expm1(tmp8) tmp10 = 1.7580993408473766 tmp11 = tmp9 * tmp10 tmp12 = tl.where(tmp4, tmp6, tmp11) tl.store(in_out_ptr0 + x3, tmp12, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_elu_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3, buf1 def conv3x3(in_, out): return nn.Conv2d(in_, out, 3, padding=1) class Conv3BNNew(nn.Module): def __init__(self, in_: 'int', out: 'int', bn=False): super().__init__() self.conv = conv3x3(in_, out) self.bn = nn.BatchNorm2d(out) if bn else None self.activation = nn.SELU(inplace=True) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
jayden-chua/image-mask
Conv3BN
false
3,698
[ "MIT" ]
0
ce2c6a32bf13df582e7b57e506d58518258be292
https://github.com/jayden-chua/image-mask/tree/ce2c6a32bf13df582e7b57e506d58518258be292
WeightedBCELoss2d
import torch import torch.nn as nn import torch.backends.cudnn import torch.utils.data class WeightedBCELoss2d(nn.Module): def __init__(self): super(WeightedBCELoss2d, self).__init__() def forward(self, logits, labels, weights): w = weights.view(-1) logits = logits.view(-1) gt = labels.view(-1) loss = logits.clamp(min=0) - logits * gt + torch.log(1 + torch.exp( -logits.abs())) loss = loss * w loss = loss.sum() / w.sum() return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.backends.cudnn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_clamp_div_exp_log_mul_neg_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp13 = tl.load(in_ptr2 + r0, None) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = tmp0 * tmp3 tmp5 = tmp2 - tmp4 tmp6 = tl_math.abs(tmp0) tmp7 = -tmp6 tmp8 = tl_math.exp(tmp7) tmp9 = 1.0 tmp10 = tmp8 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp5 + tmp11 tmp14 = tmp12 * tmp13 tmp15 = tl.broadcast_to(tmp14, [RBLOCK]) tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0)) tmp18 = tl.broadcast_to(tmp13, [RBLOCK]) tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0)) tmp21 = tmp17 / tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_add_clamp_div_exp_log_mul_neg_sub_sum_0[grid(1)]( buf2, arg1_1, arg2_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf2, class WeightedBCELoss2dNew(nn.Module): def __init__(self): super(WeightedBCELoss2dNew, self).__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
jayden-chua/image-mask
WeightedBCELoss2d
false
3,699
[ "MIT" ]
0
ce2c6a32bf13df582e7b57e506d58518258be292
https://github.com/jayden-chua/image-mask/tree/ce2c6a32bf13df582e7b57e506d58518258be292
BCEDiceLoss
import torch import torch.nn.functional as F import torch.nn as nn import torch.backends.cudnn import torch.utils.data def dice_loss(preds, trues, weight=None, is_average=True): num = preds.size(0) preds = preds.view(num, -1) trues = trues.view(num, -1) if weight is not None: w = torch.autograd.Variable(weight).view(num, -1) preds = preds * w trues = trues * w intersection = (preds * trues).sum(1) scores = 2.0 * (intersection + 1) / (preds.sum(1) + trues.sum(1) + 1) if is_average: score = scores.sum() / num return torch.clamp(score, 0.0, 1.0) else: return scores class DiceLoss(nn.Module): def __init__(self, size_average=True): super().__init__() self.size_average = size_average def forward(self, input, target, weight=None): return 1 - dice_loss(F.sigmoid(input), target, weight=weight, is_average=self.size_average) class BCEDiceLoss(nn.Module): def __init__(self, size_average=True): super().__init__() self.size_average = size_average self.dice = DiceLoss(size_average=size_average) def forward(self, input, target, weight=None): return nn.modules.loss.BCEWithLogitsLoss(size_average=self. size_average, weight=weight)(input, target) + self.dice(input, target, weight=weight) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn.functional as F import torch.nn as nn import torch.backends.cudnn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp15, None) @triton.jit def triton_per_fused_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp14 = tl.where(xmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tl.store(out_ptr0 + x0, tmp7, xmask) tl.store(out_ptr1 + x0, tmp11, xmask) tl.store(out_ptr2 + x0, tmp15, xmask) @triton.jit def triton_per_fused_add_binary_cross_entropy_with_logits_clamp_div_mul_rsub_sum_2( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl. constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp5 = tl.load(in_ptr1 + r0, None) tmp6 = tl.load(in_ptr2 + r0, None) tmp13 = tl.load(in_out_ptr0 + 0) tmp14 = tl.broadcast_to(tmp13, [XBLOCK, 1]) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp7 + tmp1 tmp9 = tmp4 / tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.sum(tmp10, 1)[:, None] tmp15 = 256.0 tmp16 = tmp14 / tmp15 tmp17 = 0.25 tmp18 = tmp12 * tmp17 tmp19 = 0.0 tmp20 = triton_helpers.maximum(tmp18, tmp19) tmp21 = triton_helpers.minimum(tmp20, tmp1) tmp22 = tmp1 - tmp21 tmp23 = tmp16 + tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp23, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_0[grid(1)](arg0_1, arg1_1, buf0, 1, 256, num_warps=2, num_stages=1) buf1 = empty_strided_cuda((4,), (1,), torch.float32) buf2 = empty_strided_cuda((4,), (1,), torch.float32) buf3 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_mul_sum_1[grid(4)](arg1_1, arg0_1, buf1, buf2, buf3, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf5 = buf0 del buf0 triton_per_fused_add_binary_cross_entropy_with_logits_clamp_div_mul_rsub_sum_2[ grid(1)](buf5, buf1, buf2, buf3, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf1 del buf2 del buf3 return buf5, def dice_loss(preds, trues, weight=None, is_average=True): num = preds.size(0) preds = preds.view(num, -1) trues = trues.view(num, -1) if weight is not None: w = torch.autograd.Variable(weight).view(num, -1) preds = preds * w trues = trues * w intersection = (preds * trues).sum(1) scores = 2.0 * (intersection + 1) / (preds.sum(1) + trues.sum(1) + 1) if is_average: score = scores.sum() / num return torch.clamp(score, 0.0, 1.0) else: return scores class DiceLoss(nn.Module): def __init__(self, size_average=True): super().__init__() self.size_average = size_average def forward(self, input, target, weight=None): return 1 - dice_loss(F.sigmoid(input), target, weight=weight, is_average=self.size_average) class BCEDiceLossNew(nn.Module): def __init__(self, size_average=True): super().__init__() self.size_average = size_average self.dice = DiceLoss(size_average=size_average) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
jayden-chua/image-mask
BCEDiceLoss
false
3,700
[ "MIT" ]
0
ce2c6a32bf13df582e7b57e506d58518258be292
https://github.com/jayden-chua/image-mask/tree/ce2c6a32bf13df582e7b57e506d58518258be292
DenseCrossEntropy
import torch from torch import nn import torch.functional as F import torch.nn.functional as F class DenseCrossEntropy(nn.Module): def __init__(self): super(DenseCrossEntropy, self).__init__() def forward(self, logits, labels): logits = logits.float() labels = labels.float() logprobs = F.log_softmax(logits, dim=-1) loss = -labels * logprobs loss = loss.sum(-1) return loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp1 = -tmp0 tmp3 = tl_math.exp(tmp2) tmp5 = tl_math.exp(tmp4) tmp6 = tmp3 + tmp5 tmp8 = tl_math.exp(tmp7) tmp9 = tmp6 + tmp8 tmp11 = tl_math.exp(tmp10) tmp12 = tmp9 + tmp11 tmp13 = tl_math.log(tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp1 * tmp14 tmp17 = -tmp16 tmp18 = tmp4 - tmp13 tmp19 = tmp17 * tmp18 tmp20 = tmp15 + tmp19 tmp22 = -tmp21 tmp23 = tmp7 - tmp13 tmp24 = tmp22 * tmp23 tmp25 = tmp20 + tmp24 tmp27 = -tmp26 tmp28 = tmp10 - tmp13 tmp29 = tmp27 * tmp28 tmp30 = tmp25 + tmp29 tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp33 = tl.sum(tmp31, 1)[:, None] tmp34 = 64.0 tmp35 = tmp33 / tmp34 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp35, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused__log_softmax_mean_mul_neg_sum_1[grid(1)](buf3, arg1_1, buf0, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg1_1 del buf0 return buf3, class DenseCrossEntropyNew(nn.Module): def __init__(self): super(DenseCrossEntropyNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
grok-phantom/pytorch_tempest
DenseCrossEntropy
false
3,701
[ "MIT" ]
0
37921b5824f9fcb853da3f54d929c4855672416e
https://github.com/grok-phantom/pytorch_tempest/tree/37921b5824f9fcb853da3f54d929c4855672416e
LightHead
import torch from torch import nn class RMSNorm(nn.Module): """An implementation of RMS Normalization. # https://catalyst-team.github.io/catalyst/_modules/catalyst/contrib/nn/modules/rms_norm.html#RMSNorm """ def __init__(self, dimension: 'int', epsilon: 'float'=1e-08, is_bias: 'bool'=False): """ Args: dimension (int): the dimension of the layer output to normalize epsilon (float): an epsilon to prevent dividing by zero in case the layer has zero variance. (default = 1e-8) is_bias (bool): a boolean value whether to include bias term while normalization """ super().__init__() self.dimension = dimension self.epsilon = epsilon self.is_bias = is_bias self.scale = nn.Parameter(torch.ones(self.dimension)) if self.is_bias: self.bias = nn.Parameter(torch.zeros(self.dimension)) def forward(self, x: 'torch.Tensor') ->torch.Tensor: x_std = torch.sqrt(torch.mean(x ** 2, -1, keepdim=True)) x_norm = x / (x_std + self.epsilon) if self.is_bias: return self.scale * x_norm + self.bias return self.scale * x_norm class LightHead(nn.Module): def __init__(self, in_features, num_classes): super().__init__() self.norm = RMSNorm(in_features) self.fc = nn.Linear(in_features, num_classes) def forward(self, x): x = self.norm(x) x = self.fc(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_mean_mul_pow_sqrt_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = 4.0 tmp14 = tmp12 / tmp13 tmp15 = libdevice.sqrt(tmp14) tmp16 = 1e-08 tmp17 = tmp15 + tmp16 tmp18 = tmp1 / tmp17 tmp19 = tmp0 * tmp18 tl.store(out_ptr0 + x2, tmp19, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mean_mul_pow_sqrt_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_1, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), primals_3 class RMSNorm(nn.Module): """An implementation of RMS Normalization. # https://catalyst-team.github.io/catalyst/_modules/catalyst/contrib/nn/modules/rms_norm.html#RMSNorm """ def __init__(self, dimension: 'int', epsilon: 'float'=1e-08, is_bias: 'bool'=False): """ Args: dimension (int): the dimension of the layer output to normalize epsilon (float): an epsilon to prevent dividing by zero in case the layer has zero variance. (default = 1e-8) is_bias (bool): a boolean value whether to include bias term while normalization """ super().__init__() self.dimension = dimension self.epsilon = epsilon self.is_bias = is_bias self.scale = nn.Parameter(torch.ones(self.dimension)) if self.is_bias: self.bias = nn.Parameter(torch.zeros(self.dimension)) def forward(self, x: 'torch.Tensor') ->torch.Tensor: x_std = torch.sqrt(torch.mean(x ** 2, -1, keepdim=True)) x_norm = x / (x_std + self.epsilon) if self.is_bias: return self.scale * x_norm + self.bias return self.scale * x_norm class LightHeadNew(nn.Module): def __init__(self, in_features, num_classes): super().__init__() self.norm = RMSNorm(in_features) self.fc = nn.Linear(in_features, num_classes) def forward(self, input_0): primals_2 = self.norm.scale primals_3 = self.fc.weight primals_4 = self.fc.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
grok-phantom/pytorch_tempest
LightHead
false
3,702
[ "MIT" ]
0
37921b5824f9fcb853da3f54d929c4855672416e
https://github.com/grok-phantom/pytorch_tempest/tree/37921b5824f9fcb853da3f54d929c4855672416e
RelativeSelfAttentionLayer
import math import torch import torch.nn.functional as F import torch.nn as nn class LayerNorm(nn.Module): def __init__(self, channels, eps=1e-05): super().__init__() self.channels = channels self.eps = eps self.gamma = nn.Parameter(torch.ones(channels)) self.beta = nn.Parameter(torch.zeros(channels)) def forward(self, x): mean = torch.mean(x, 1, keepdim=True) variance = torch.mean((x - mean) ** 2, 1, keepdim=True) x = (x - mean) * torch.rsqrt(variance + self.eps) x = x * self.gamma.view(1, -1, 1) + self.beta.view(1, -1, 1) return x class RelativeMultiHeadAttention(nn.Module): def __init__(self, channels, num_heads, dropout): super(RelativeMultiHeadAttention, self).__init__() assert channels % num_heads == 0, 'd_model % num_heads should be zero.' self.channels = channels self.inner_channels = channels // num_heads self.num_heads = num_heads self.sqrt_dim = math.sqrt(channels) self.query_proj = nn.Conv1d(channels, channels, 1) self.key_proj = nn.Conv1d(channels, channels, 1) self.value_proj = nn.Conv1d(channels, channels, 1) self.pos_proj = nn.Conv1d(channels, channels, 1, bias=False) self.dropout = nn.Dropout(p=dropout) self.u_bias = nn.Parameter(torch.Tensor(self.num_heads, self. inner_channels)) self.v_bias = nn.Parameter(torch.Tensor(self.num_heads, self. inner_channels)) torch.nn.init.xavier_uniform_(self.u_bias) torch.nn.init.xavier_uniform_(self.v_bias) self.out_proj = nn.Conv1d(channels, channels, 1) def forward(self, query, key, value, pos_embedding, mask=None): B = value.size(0) query = self.query_proj(query).view(B, self.num_heads, self. inner_channels, -1) key = self.key_proj(key).view(B, self.num_heads, self. inner_channels, -1) value = self.value_proj(value).view(B, self.num_heads, self. inner_channels, -1) B_pos = pos_embedding.size(0) pos_emb = self.pos_proj(pos_embedding).view(B_pos, self.num_heads, self.inner_channels, -1) content_score = torch.matmul((query + self.u_bias[None, :, :, None] ).transpose(-1, -2), key) pos_score = torch.matmul((query + self.v_bias[None, :, :, None]). transpose(-1, -2), pos_emb) pos_score = self.rel_shift(pos_score) score = (content_score + pos_score) / self.sqrt_dim if mask is not None: score = score.masked_fill(mask == 0, -10000.0) attn_map = F.softmax(score, -1) attn = self.dropout(attn_map) context = torch.matmul(value, attn) context = context.contiguous().view(B, self.channels, -1) return self.out_proj(context) @staticmethod def rel_shift(x): B, H, T1, T2 = x.size() zero_pad = torch.zeros((B, H, T1, 1), device=x.device, dtype=x.dtype) x_padded = torch.cat([zero_pad, x], dim=-1) x_padded = x_padded.view(B, H, T2 + 1, T1) x = x_padded[:, :, 1:].view_as(x)[:, :, :, :T2 // 2 + 1] return x class RelativeSelfAttentionLayer(nn.Module): def __init__(self, channels, n_heads, dropout): super(RelativeSelfAttentionLayer, self).__init__() self.norm = LayerNorm(channels) self.mha = RelativeMultiHeadAttention(channels, n_heads, dropout) self.dropout = nn.Dropout(dropout) def forward(self, x, pos_emb, x_mask): attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) x = self.norm(x) x = self.mha(x, x, x, pos_emb, attn_mask) x = self.dropout(x) return x def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'n_heads': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_mean_mul_pow_rsqrt_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = 1e-05 tmp15 = tmp13 + tmp14 tmp16 = libdevice.rsqrt(tmp15) tmp17 = tmp0 * tmp16 tmp19 = tmp17 * tmp18 tmp21 = tmp19 + tmp20 tl.store(out_ptr0 + x2, tmp21, xmask) @triton.jit def triton_poi_fused_add_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 16 x1 = xindex // 4 % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused_eq_mul_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 * tmp0 tmp2 = 0.0 tmp3 = tmp1 == tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused__softmax_add_div_masked_fill_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 16 x5 = xindex x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp30 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp2 = x5 % 2 tl.full([1], 0, tl.int64) tmp5 = tl.full([1], 1, tl.int64) tmp6 = tmp2 < tmp5 tmp7 = 0.0 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp6, tmp7, tmp8) tmp10 = tmp2 >= tmp5 tl.full([1], 2, tl.int64) tmp13 = tl.load(in_ptr2 + (2 + 4 * x1 + 4 * ((4 + x0) // 8) + 16 * x2 + 16 * ((4 + x0 + 8 * x1) // 32) + x0 // 2), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.where(tmp6, tmp9, tmp13) tmp15 = tmp1 + tmp14 tmp16 = 0.5 tmp17 = tmp15 * tmp16 tmp18 = -10000.0 tmp19 = tl.where(tmp0, tmp18, tmp17) tmp21 = tmp20 + tmp14 tmp22 = tmp21 * tmp16 tmp23 = tl.where(tmp0, tmp18, tmp22) tmp24 = triton_helpers.maximum(tmp19, tmp23) tmp26 = tmp25 + tmp14 tmp27 = tmp26 * tmp16 tmp28 = tl.where(tmp0, tmp18, tmp27) tmp29 = triton_helpers.maximum(tmp24, tmp28) tmp31 = tmp30 + tmp14 tmp32 = tmp31 * tmp16 tmp33 = tl.where(tmp0, tmp18, tmp32) tmp34 = triton_helpers.maximum(tmp29, tmp33) tmp35 = tmp19 - tmp34 tmp36 = tl_math.exp(tmp35) tmp37 = tmp23 - tmp34 tmp38 = tl_math.exp(tmp37) tmp39 = tmp36 + tmp38 tmp40 = tmp28 - tmp34 tmp41 = tl_math.exp(tmp40) tmp42 = tmp39 + tmp41 tmp43 = tmp33 - tmp34 tmp44 = tl_math.exp(tmp43) tmp45 = tmp42 + tmp44 tl.store(out_ptr0 + x5, tmp34, xmask) tl.store(out_ptr1 + x5, tmp45, xmask) @triton.jit def triton_poi_fused__softmax_add_div_masked_fill_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex // 4 % 16 x5 = xindex % 64 x7 = xindex x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x8 = xindex // 4 tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr3 + x8, xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr4 + x8, xmask, eviction_policy='evict_last') tmp2 = x7 // 4 % 4 % 2 tl.full([1], 0, tl.int64) tmp5 = tl.full([1], 1, tl.int64) tmp6 = tmp2 < tmp5 tmp7 = 0.0 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp6, tmp7, tmp8) tmp10 = tmp2 >= tmp5 tl.full([1], 2, tl.int64) tmp13 = tl.load(in_ptr2 + (2 + 4 * x2 + 4 * ((4 + x1) // 8) + 16 * x3 + 16 * ((4 + x1 + 8 * x2) // 32) + x1 // 2), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.where(tmp6, tmp9, tmp13) tmp15 = tmp1 + tmp14 tmp16 = 0.5 tmp17 = tmp15 * tmp16 tmp18 = -10000.0 tmp19 = tl.where(tmp0, tmp18, tmp17) tmp21 = tmp19 - tmp20 tmp22 = tl_math.exp(tmp21) tmp24 = tmp22 / tmp23 tl.store(out_ptr0 + x7, tmp24, xmask) @triton.jit def triton_poi_fused_clone_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 16 x1 = xindex // 4 % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_13, (4, 1), (1, 1)) assert_size_stride(primals_14, (4, 1), (1, 1)) assert_size_stride(primals_15, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_16, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_sub_0[grid(16)](primals_2, buf0, 16, XBLOCK= 16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mean_mul_pow_rsqrt_1[grid(16)](buf0, primals_3, primals_4, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 del primals_4 buf2 = extern_kernels.convolution(buf1, primals_5, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (1, 4, 4), (16, 4, 1)) buf3 = extern_kernels.convolution(buf1, primals_7, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf3, (1, 4, 4), (16, 4, 1)) buf4 = extern_kernels.convolution(buf1, primals_9, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf4, (1, 4, 4), (16, 4, 1)) buf5 = extern_kernels.convolution(reinterpret_tensor(primals_11, (1, 4, 4), (16, 4, 1), 0), primals_12, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf5, (1, 4, 4), (16, 4, 1)) buf6 = reinterpret_tensor(buf0, (1, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_add_2[grid(16)](buf2, primals_6, primals_13, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_13 buf7 = buf3 del buf3 triton_poi_fused_convolution_3[grid(16)](buf7, primals_8, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_8 buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf6, (4, 4, 1), (4, 1, 0), 0 ), reinterpret_tensor(buf7, (4, 1, 4), (4, 0, 1), 0), out=buf8) buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_4[grid(64)](buf2, primals_6, primals_14, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 del primals_14 del primals_6 buf10 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 1), (1, 1, 1), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.bool) triton_poi_fused_eq_mul_5[grid(16)](primals_1, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf12 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf13 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_add_div_masked_fill_6[grid(64)](buf11, buf8, buf10, buf12, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1 ) buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_add_div_masked_fill_7[grid(256)](buf11, buf8, buf10, buf12, buf13, buf14, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf10 del buf12 buf15 = reinterpret_tensor(buf8, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf8 triton_poi_fused_clone_8[grid(64)](buf4, primals_10, buf15, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf4 del primals_10 buf16 = reinterpret_tensor(buf13, (16, 1, 4), (4, 4, 1), 0) del buf13 extern_kernels.bmm(reinterpret_tensor(buf15, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf14, (16, 4, 4), (16, 4, 1), 0), out=buf16 ) buf17 = extern_kernels.convolution(reinterpret_tensor(buf16, (1, 4, 16), (64, 16, 1), 0), primals_15, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf17, (1, 4, 16), (64, 16, 1)) buf18 = buf17 del buf17 triton_poi_fused_convolution_9[grid(64)](buf18, primals_16, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_16 return (buf18, primals_2, primals_5, primals_7, primals_9, primals_12, primals_15, buf1, reinterpret_tensor(primals_11, (1, 4, 4), (16, 4, 1), 0), buf11, buf14, reinterpret_tensor(buf16, (1, 4, 16), (64, 16, 1), 0), reinterpret_tensor(buf15, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf6, (4, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf7, (4, 4, 1), (4, 1, 4), 0)) class LayerNorm(nn.Module): def __init__(self, channels, eps=1e-05): super().__init__() self.channels = channels self.eps = eps self.gamma = nn.Parameter(torch.ones(channels)) self.beta = nn.Parameter(torch.zeros(channels)) def forward(self, x): mean = torch.mean(x, 1, keepdim=True) variance = torch.mean((x - mean) ** 2, 1, keepdim=True) x = (x - mean) * torch.rsqrt(variance + self.eps) x = x * self.gamma.view(1, -1, 1) + self.beta.view(1, -1, 1) return x class RelativeMultiHeadAttention(nn.Module): def __init__(self, channels, num_heads, dropout): super(RelativeMultiHeadAttention, self).__init__() assert channels % num_heads == 0, 'd_model % num_heads should be zero.' self.channels = channels self.inner_channels = channels // num_heads self.num_heads = num_heads self.sqrt_dim = math.sqrt(channels) self.query_proj = nn.Conv1d(channels, channels, 1) self.key_proj = nn.Conv1d(channels, channels, 1) self.value_proj = nn.Conv1d(channels, channels, 1) self.pos_proj = nn.Conv1d(channels, channels, 1, bias=False) self.dropout = nn.Dropout(p=dropout) self.u_bias = nn.Parameter(torch.Tensor(self.num_heads, self. inner_channels)) self.v_bias = nn.Parameter(torch.Tensor(self.num_heads, self. inner_channels)) torch.nn.init.xavier_uniform_(self.u_bias) torch.nn.init.xavier_uniform_(self.v_bias) self.out_proj = nn.Conv1d(channels, channels, 1) def forward(self, query, key, value, pos_embedding, mask=None): B = value.size(0) query = self.query_proj(query).view(B, self.num_heads, self. inner_channels, -1) key = self.key_proj(key).view(B, self.num_heads, self. inner_channels, -1) value = self.value_proj(value).view(B, self.num_heads, self. inner_channels, -1) B_pos = pos_embedding.size(0) pos_emb = self.pos_proj(pos_embedding).view(B_pos, self.num_heads, self.inner_channels, -1) content_score = torch.matmul((query + self.u_bias[None, :, :, None] ).transpose(-1, -2), key) pos_score = torch.matmul((query + self.v_bias[None, :, :, None]). transpose(-1, -2), pos_emb) pos_score = self.rel_shift(pos_score) score = (content_score + pos_score) / self.sqrt_dim if mask is not None: score = score.masked_fill(mask == 0, -10000.0) attn_map = F.softmax(score, -1) attn = self.dropout(attn_map) context = torch.matmul(value, attn) context = context.contiguous().view(B, self.channels, -1) return self.out_proj(context) @staticmethod def rel_shift(x): B, H, T1, T2 = x.size() zero_pad = torch.zeros((B, H, T1, 1), device=x.device, dtype=x.dtype) x_padded = torch.cat([zero_pad, x], dim=-1) x_padded = x_padded.view(B, H, T2 + 1, T1) x = x_padded[:, :, 1:].view_as(x)[:, :, :, :T2 // 2 + 1] return x class RelativeSelfAttentionLayerNew(nn.Module): def __init__(self, channels, n_heads, dropout): super(RelativeSelfAttentionLayerNew, self).__init__() self.norm = LayerNorm(channels) self.mha = RelativeMultiHeadAttention(channels, n_heads, dropout) self.dropout = nn.Dropout(dropout) def forward(self, input_0, input_1, input_2): primals_3 = self.norm.gamma primals_4 = self.norm.beta primals_13 = self.mha.u_bias primals_14 = self.mha.v_bias primals_5 = self.mha.query_proj.weight primals_6 = self.mha.query_proj.bias primals_7 = self.mha.key_proj.weight primals_8 = self.mha.key_proj.bias primals_9 = self.mha.value_proj.weight primals_10 = self.mha.value_proj.bias primals_12 = self.mha.pos_proj.weight primals_15 = self.mha.out_proj.weight primals_16 = self.mha.out_proj.bias primals_1 = input_0 primals_2 = input_1 primals_11 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16]) return output[0]
ishine/tfm-tts
RelativeSelfAttentionLayer
false
3,703
[ "MIT" ]
0
a964736467851ddec8f8e8933b9550cbe7d7d7eb
https://github.com/ishine/tfm-tts/tree/a964736467851ddec8f8e8933b9550cbe7d7d7eb
CosineActivation
import torch from torch import nn def t2v(tau, f, out_features, w, b, w0, b0, arg=None): if arg: v1 = f(torch.matmul(tau, w) + b, arg) else: v1 = f(torch.matmul(tau, w) + b) v2 = torch.matmul(tau, w0) + b0 return torch.cat([v1, v2], 1) class CosineActivation(nn.Module): def __init__(self, in_features, out_features): super(CosineActivation, self).__init__() self.out_features = out_features self.w0 = nn.parameter.Parameter(torch.randn(in_features, 1)) self.b0 = nn.parameter.Parameter(torch.randn(1)) self.w = nn.parameter.Parameter(torch.randn(in_features, out_features - 1)) self.b = nn.parameter.Parameter(torch.randn(out_features - 1)) self.f = torch.cos def forward(self, tau): return t2v(tau, self.f, self.out_features, self.w, self.b, self.w0, self.b0) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_cos_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 3 x1 = xindex // 3 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl_math.cos(tmp2) tl.store(out_ptr0 + (x0 + 4 * x1), tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 3), (3, 1)) assert_size_stride(primals_2, (3,), (1,)) assert_size_stride(primals_3, (4, 1), (1, 1)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3), (3, 1), torch.float32) extern_kernels.mm(primals_5, primals_1, out=buf0) del primals_1 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf2 = reinterpret_tensor(buf4, (4, 1), (4, 1), 3) extern_kernels.addmm(primals_4, primals_5, primals_3, alpha=1, beta =1, out=buf2) del primals_3 del primals_4 buf3 = reinterpret_tensor(buf4, (4, 3), (4, 1), 0) get_raw_stream(0) triton_poi_fused_add_cos_0[grid(12)](buf0, primals_2, buf3, 12, XBLOCK=16, num_warps=1, num_stages=1) return buf4, primals_2, buf0, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0) def t2v(tau, f, out_features, w, b, w0, b0, arg=None): if arg: v1 = f(torch.matmul(tau, w) + b, arg) else: v1 = f(torch.matmul(tau, w) + b) v2 = torch.matmul(tau, w0) + b0 return torch.cat([v1, v2], 1) class CosineActivationNew(nn.Module): def __init__(self, in_features, out_features): super(CosineActivationNew, self).__init__() self.out_features = out_features self.w0 = nn.parameter.Parameter(torch.randn(in_features, 1)) self.b0 = nn.parameter.Parameter(torch.randn(1)) self.w = nn.parameter.Parameter(torch.randn(in_features, out_features - 1)) self.b = nn.parameter.Parameter(torch.randn(out_features - 1)) self.f = torch.cos def forward(self, input_0): primals_3 = self.w0 primals_4 = self.b0 primals_1 = self.w primals_2 = self.b primals_5 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
jaredfeng-ca/Time2Vec-PyTorch
CosineActivation
false
3,704
[ "MIT" ]
0
b42205f6721f5a6faf16134e604af28476490d0a
https://github.com/jaredfeng-ca/Time2Vec-PyTorch/tree/b42205f6721f5a6faf16134e604af28476490d0a
UNetModule
import torch import torch.nn as nn import torch.backends.cudnn import torch.utils.data def conv3x3(in_, out): return nn.Conv2d(in_, out, 3, padding=1) class Conv3BN(nn.Module): def __init__(self, in_: 'int', out: 'int', bn=False): super().__init__() self.conv = conv3x3(in_, out) self.bn = nn.BatchNorm2d(out) if bn else None self.activation = nn.SELU(inplace=True) def forward(self, x): x = self.conv(x) if self.bn is not None: x = self.bn(x) x = self.activation(x) return x class UNetModule(nn.Module): def __init__(self, in_: 'int', out: 'int'): super().__init__() self.l1 = Conv3BN(in_, out) self.l2 = Conv3BN(out, out) def forward(self, x): x = self.l1(x) x = self.l2(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_': 4, 'out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.backends.cudnn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0507009873554805 tmp6 = tmp2 * tmp5 tmp7 = 1.0 tmp8 = tmp2 * tmp7 tmp9 = libdevice.expm1(tmp8) tmp10 = 1.7580993408473766 tmp11 = tmp9 * tmp10 tmp12 = tl.where(tmp4, tmp6, tmp11) tl.store(in_out_ptr0 + x3, tmp12, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_elu_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_elu_0[grid(256)](buf3, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1, buf3 def conv3x3(in_, out): return nn.Conv2d(in_, out, 3, padding=1) class Conv3BN(nn.Module): def __init__(self, in_: 'int', out: 'int', bn=False): super().__init__() self.conv = conv3x3(in_, out) self.bn = nn.BatchNorm2d(out) if bn else None self.activation = nn.SELU(inplace=True) def forward(self, x): x = self.conv(x) if self.bn is not None: x = self.bn(x) x = self.activation(x) return x class UNetModuleNew(nn.Module): def __init__(self, in_: 'int', out: 'int'): super().__init__() self.l1 = Conv3BN(in_, out) self.l2 = Conv3BN(out, out) def forward(self, input_0): primals_1 = self.l1.conv.weight primals_2 = self.l1.conv.bias primals_4 = self.l2.conv.weight primals_5 = self.l2.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
jayden-chua/image-mask
UNetModule
false
3,705
[ "MIT" ]
0
ce2c6a32bf13df582e7b57e506d58518258be292
https://github.com/jayden-chua/image-mask/tree/ce2c6a32bf13df582e7b57e506d58518258be292
SineActivation
import torch from torch import nn def t2v(tau, f, out_features, w, b, w0, b0, arg=None): if arg: v1 = f(torch.matmul(tau, w) + b, arg) else: v1 = f(torch.matmul(tau, w) + b) v2 = torch.matmul(tau, w0) + b0 return torch.cat([v1, v2], 1) class SineActivation(nn.Module): def __init__(self, in_features, out_features): super(SineActivation, self).__init__() self.out_features = out_features self.w0 = nn.parameter.Parameter(torch.randn(in_features, 1)) self.b0 = nn.parameter.Parameter(torch.randn(1)) self.w = nn.parameter.Parameter(torch.randn(in_features, out_features - 1)) self.b = nn.parameter.Parameter(torch.randn(out_features - 1)) self.f = torch.sin def forward(self, tau): return t2v(tau, self.f, self.out_features, self.w, self.b, self.w0, self.b0) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_sin_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 3 x1 = xindex // 3 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl_math.sin(tmp2) tl.store(out_ptr0 + (x0 + 4 * x1), tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 3), (3, 1)) assert_size_stride(primals_2, (3,), (1,)) assert_size_stride(primals_3, (4, 1), (1, 1)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3), (3, 1), torch.float32) extern_kernels.mm(primals_5, primals_1, out=buf0) del primals_1 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf2 = reinterpret_tensor(buf4, (4, 1), (4, 1), 3) extern_kernels.addmm(primals_4, primals_5, primals_3, alpha=1, beta =1, out=buf2) del primals_3 del primals_4 buf3 = reinterpret_tensor(buf4, (4, 3), (4, 1), 0) get_raw_stream(0) triton_poi_fused_add_sin_0[grid(12)](buf0, primals_2, buf3, 12, XBLOCK=16, num_warps=1, num_stages=1) return buf4, primals_2, buf0, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0) def t2v(tau, f, out_features, w, b, w0, b0, arg=None): if arg: v1 = f(torch.matmul(tau, w) + b, arg) else: v1 = f(torch.matmul(tau, w) + b) v2 = torch.matmul(tau, w0) + b0 return torch.cat([v1, v2], 1) class SineActivationNew(nn.Module): def __init__(self, in_features, out_features): super(SineActivationNew, self).__init__() self.out_features = out_features self.w0 = nn.parameter.Parameter(torch.randn(in_features, 1)) self.b0 = nn.parameter.Parameter(torch.randn(1)) self.w = nn.parameter.Parameter(torch.randn(in_features, out_features - 1)) self.b = nn.parameter.Parameter(torch.randn(out_features - 1)) self.f = torch.sin def forward(self, input_0): primals_3 = self.w0 primals_4 = self.b0 primals_1 = self.w primals_2 = self.b primals_5 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
jaredfeng-ca/Time2Vec-PyTorch
SineActivation
false
3,706
[ "MIT" ]
0
b42205f6721f5a6faf16134e604af28476490d0a
https://github.com/jaredfeng-ca/Time2Vec-PyTorch/tree/b42205f6721f5a6faf16134e604af28476490d0a
PairwiseDistance
import torch import torch.nn as nn class PairwiseDistance(nn.Module): """class for calculating distance Arguments: nn {[type]} -- [description] """ def __init__(self, smooth=0.0001): """Initializer Arguments: smooth {int} -- [description] """ super(PairwiseDistance, self).__init__() self.smooth = smooth def forward(self, x1, x2): """x1, x2 represent input data Arguments: x1 {[type]} -- [description] x2 {[type]} -- [description] Returns: [type] -- [description] """ assert x1.size() == x2.size() diff = torch.abs(x1 - x2) out = torch.pow(diff, 2).sum(dim=1) return torch.pow(out + self.smooth, 0.5) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_abs_add_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp6 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp11 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp12 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp17 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp18 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tmp3 * tmp3 tmp7 = tmp5 - tmp6 tmp8 = tl_math.abs(tmp7) tmp9 = tmp8 * tmp8 tmp10 = tmp4 + tmp9 tmp13 = tmp11 - tmp12 tmp14 = tl_math.abs(tmp13) tmp15 = tmp14 * tmp14 tmp16 = tmp10 + tmp15 tmp19 = tmp17 - tmp18 tmp20 = tl_math.abs(tmp19) tmp21 = tmp20 * tmp20 tmp22 = tmp16 + tmp21 tmp23 = 0.0001 tmp24 = tmp22 + tmp23 tmp25 = libdevice.sqrt(tmp24) tl.store(out_ptr0 + x2, tmp25, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_abs_add_pow_sub_sum_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf0, class PairwiseDistanceNew(nn.Module): """class for calculating distance Arguments: nn {[type]} -- [description] """ def __init__(self, smooth=0.0001): """Initializer Arguments: smooth {int} -- [description] """ super(PairwiseDistanceNew, self).__init__() self.smooth = smooth def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
jce2090/palmprint-recognition
PairwiseDistance
false
3,707
[ "MIT" ]
0
d2d93c6817afe1b67650dae6516a3d180aaeca38
https://github.com/jce2090/palmprint-recognition/tree/d2d93c6817afe1b67650dae6516a3d180aaeca38
DAImgHead
import torch import torch.nn as nn import torchvision.transforms.functional as F import torch.nn.functional as F import torch.cuda.amp class DAImgHead(nn.Module): """ Add a simple Image-level Domain Classifier head """ def __init__(self, in_channels): """ Arguments: in_channels (int): number of channels of the input feature """ super(DAImgHead, self).__init__() self.conv1_da = nn.Conv2d(in_channels, 512, kernel_size=1, stride=1) self.conv2_da = nn.Conv2d(512, 1, kernel_size=1, stride=1) for l in [self.conv1_da, self.conv2_da]: nn.init.normal_(l.weight, std=0.001) nn.init.constant_(l.bias, 0) def forward(self, x): feature = F.relu(self.conv1_da(x)) feature = self.conv2_da(feature) return feature def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.cuda.amp assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask) tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (512, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (512,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 16)](primals_3, buf0, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 512, 4, 4), (8192, 1, 2048, 512)) buf2 = buf1 del buf1 triton_poi_fused_convolution_relu_1[grid(32768)](buf2, primals_2, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 1, 4, 4), (16, 1, 4, 1)) buf4 = reinterpret_tensor(buf3, (4, 1, 4, 4), (16, 16, 4, 1), 0) del buf3 triton_poi_fused_convolution_2[grid(64)](buf4, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 return buf4, primals_1, buf0, primals_4, buf2 class DAImgHeadNew(nn.Module): """ Add a simple Image-level Domain Classifier head """ def __init__(self, in_channels): """ Arguments: in_channels (int): number of channels of the input feature """ super(DAImgHeadNew, self).__init__() self.conv1_da = nn.Conv2d(in_channels, 512, kernel_size=1, stride=1) self.conv2_da = nn.Conv2d(512, 1, kernel_size=1, stride=1) for l in [self.conv1_da, self.conv2_da]: nn.init.normal_(l.weight, std=0.001) nn.init.constant_(l.bias, 0) def forward(self, input_0): primals_1 = self.conv1_da.weight primals_2 = self.conv1_da.bias primals_4 = self.conv2_da.weight primals_5 = self.conv2_da.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
enpko47/DA-CenterNet
DAImgHead
false
3,708
[ "MIT" ]
0
ef0a99b8ba741fa1dbd66fa58ccae9bf8759ae86
https://github.com/enpko47/DA-CenterNet/tree/ef0a99b8ba741fa1dbd66fa58ccae9bf8759ae86
ShiftSoftplus
import torch import numpy as np from torch.nn import Softplus class ShiftSoftplus(Softplus): """ Shiftsoft plus activation function: 1/beta * (log(1 + exp**(beta * x)) - log(shift)) """ def __init__(self, beta=1, shift=2, threshold=20): super().__init__(beta, threshold) self.shift = shift self.softplus = Softplus(beta, threshold) def forward(self, input): return self.softplus(input) - np.log(float(self.shift)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch.nn import Softplus assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_log_softplus_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = 0.6931471805599453 tmp7 = tmp5 - tmp6 tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_log_softplus_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class ShiftSoftplusNew(Softplus): """ Shiftsoft plus activation function: 1/beta * (log(1 + exp**(beta * x)) - log(shift)) """ def __init__(self, beta=1, shift=2, threshold=20): super().__init__(beta, threshold) self.shift = shift self.softplus = Softplus(beta, threshold) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
jeah-z/BDE-FGCN-DFT
ShiftSoftplus
false
3,709
[ "MIT" ]
0
5542544079642a371f08c8c1f356fa235d895194
https://github.com/jeah-z/BDE-FGCN-DFT/tree/5542544079642a371f08c8c1f356fa235d895194
PyTorchMlp
import torch import torch.nn as nn class PyTorchMlp(nn.Module): def __init__(self, n_inputs=4, n_actions=2): nn.Module.__init__(self) self.fc1 = nn.Linear(n_inputs, 512) self.fc2 = nn.Linear(512, 256) self.fc3 = nn.Linear(256, n_actions) self.activ_fn = nn.ReLU() self.out_activ = nn.Softmax(dim=0) def forward(self, x): x = self.activ_fn(self.fc1(x)) x = self.activ_fn(self.fc2(x)) x = self.out_activ(self.fc3(x)) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (96 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (96 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (512, 4), (4, 1)) assert_size_stride(primals_2, (512,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (256, 512), (512, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (2, 256), (256, 1)) assert_size_stride(primals_7, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 512), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 512), (8192, 2048, 512, 1), 0 ) del buf0 buf8 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(32768)](buf1, primals_2, buf8, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 512), (512, 1), 0), reinterpret_tensor(primals_4, (512, 256), (1, 512), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf2 buf7 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(16384)](buf3, primals_5, buf7, 16384, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 256), (256, 1), 0), reinterpret_tensor(primals_6, (256, 2), (1, 256), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32) triton_poi_fused__softmax_2[grid(128)](buf4, buf5, 128, XBLOCK=128, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 2), (32, 8, 2, 1), 0) del buf4 triton_poi_fused__softmax_3[grid(128)](buf5, buf6, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf5 return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 512), (512, 1), 0 ), reinterpret_tensor(buf3, (64, 256), (256, 1), 0 ), buf6, primals_6, buf7, primals_4, buf8 class PyTorchMlpNew(nn.Module): def __init__(self, n_inputs=4, n_actions=2): nn.Module.__init__(self) self.fc1 = nn.Linear(n_inputs, 512) self.fc2 = nn.Linear(512, 256) self.fc3 = nn.Linear(256, n_actions) self.activ_fn = nn.ReLU() self.out_activ = nn.Softmax(dim=0) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
jasonjabbour/motion_imitation
PyTorchMlp
false
3,710
[ "Apache-2.0" ]
0
a28e7cd9dca2fbdd6823f19db4f66b496dd29144
https://github.com/jasonjabbour/motion_imitation/tree/a28e7cd9dca2fbdd6823f19db4f66b496dd29144
DNHloss
import torch import torch.nn as nn class DNHloss(nn.Module): """DNH loss function Arguments: nn {[type]} -- [description] """ def __init__(self, lamda): """Initializer class Arguments: lamda {[type]} -- [description] """ super(DNHloss, self).__init__() self.lamda = lamda def forward(self, H, S): """Forward H and S Arguments: H {[type]} -- [description] S {[type]} -- [description] Returns: [type] -- [description] """ theta = H @ H.t() / 2 metric_loss = (torch.log(1 + torch.exp(-(self.lamda * theta).abs()) ) + theta.clamp(min=0) - self.lamda * S * theta).mean() quantization_loss = self.logcosh(H.abs() - 1).mean() loss = metric_loss + self.lamda * quantization_loss return loss def logcosh(self, x): """log cos Arguments: x {[type]} -- [description] Returns: [type] -- [description] """ return torch.log(torch.cosh(x)) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'lamda': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_abs_add_clamp_div_exp_log_mean_mul_neg_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex % 16 r2 = rindex tmp0 = tl.load(in_ptr0 + r0, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + r2, None) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 4.0 tmp4 = tmp2 * tmp3 tmp5 = tl_math.abs(tmp4) tmp6 = -tmp5 tmp7 = tl_math.exp(tmp6) tmp8 = 1.0 tmp9 = tmp7 + tmp8 tmp10 = tl_math.log(tmp9) tmp11 = 0.0 tmp12 = triton_helpers.maximum(tmp2, tmp11) tmp13 = tmp10 + tmp12 tmp15 = tmp14 * tmp3 tmp16 = tmp15 * tmp2 tmp17 = tmp13 - tmp16 tmp18 = tl.broadcast_to(tmp17, [RBLOCK]) tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None) @triton.jit def triton_per_fused_abs_add_clamp_cosh_div_exp_log_mean_mul_neg_sub_1( in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp9 = tl.load(in_out_ptr0 + 0) tmp10 = tl.broadcast_to(tmp9, [XBLOCK, 1]) tmp1 = tl_math.abs(tmp0) tmp2 = 1.0 tmp3 = tmp1 - tmp2 tmp4 = libdevice.cosh(tmp3) tmp5 = tl_math.log(tmp4) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tmp11 = 256.0 tmp12 = tmp10 / tmp11 tmp13 = 16.0 tmp14 = tmp8 / tmp13 tmp15 = 4.0 tmp16 = tmp14 * tmp15 tmp17 = tmp12 + tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(arg0_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_abs_add_clamp_div_exp_log_mean_mul_neg_sub_0[grid(1)]( buf0, arg1_1, buf1, 1, 256, num_warps=2, num_stages=1) del arg1_1 del buf0 buf3 = buf1 del buf1 triton_per_fused_abs_add_clamp_cosh_div_exp_log_mean_mul_neg_sub_1[grid (1)](buf3, arg0_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf3, class DNHlossNew(nn.Module): """DNH loss function Arguments: nn {[type]} -- [description] """ def __init__(self, lamda): """Initializer class Arguments: lamda {[type]} -- [description] """ super(DNHlossNew, self).__init__() self.lamda = lamda def logcosh(self, x): """log cos Arguments: x {[type]} -- [description] Returns: [type] -- [description] """ return torch.log(torch.cosh(x)) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
jce2090/palmprint-recognition
DNHloss
false
3,711
[ "MIT" ]
0
d2d93c6817afe1b67650dae6516a3d180aaeca38
https://github.com/jce2090/palmprint-recognition/tree/d2d93c6817afe1b67650dae6516a3d180aaeca38
folder
import torch from torch import nn import torch.nn.functional as F import torch.nn.parallel class folder(nn.Module): def __init__(self): super().__init__() def forward(self, feature_map): N, _, H, W = feature_map.size() feature_map = F.unfold(feature_map, kernel_size=3, padding=1) feature_map = feature_map.view(N, -1, H, W) return feature_map def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_im2col_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl .constexpr, XBLOCK: tl.constexpr): ynumel = 144 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x4 = xindex // 4 y1 = yindex // 3 % 3 x3 = xindex % 4 y0 = yindex % 3 x6 = xindex y2 = yindex // 9 y7 = yindex tmp0 = -1 + x4 + y1 tmp1 = tl.full([1, 1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1, 1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x3 + y0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x6 + y0 + 4 * y1 + 16 * y2), tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0) tl.store(out_ptr0 + (x6 + 16 * y7), tmp11, xmask & ymask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3, 4, 4), (576, 144, 48, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_im2col_0[grid(144, 16)](arg0_1, buf0, 144, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 36, 4, 4), (576, 16, 4, 1), 0), class folderNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
hav4ik/AdelaiDet
folder
false
3,712
[ "BSD-2-Clause" ]
0
6ed9c1e1a25a3e25dddfa858ce0f219a30593ce2
https://github.com/hav4ik/AdelaiDet/tree/6ed9c1e1a25a3e25dddfa858ce0f219a30593ce2
TripletMarginLoss
import torch import torch.nn as nn class PairwiseDistance(nn.Module): """class for calculating distance Arguments: nn {[type]} -- [description] """ def __init__(self, smooth=0.0001): """Initializer Arguments: smooth {int} -- [description] """ super(PairwiseDistance, self).__init__() self.smooth = smooth def forward(self, x1, x2): """x1, x2 represent input data Arguments: x1 {[type]} -- [description] x2 {[type]} -- [description] Returns: [type] -- [description] """ assert x1.size() == x2.size() diff = torch.abs(x1 - x2) out = torch.pow(diff, 2).sum(dim=1) return torch.pow(out + self.smooth, 0.5) class TripletMarginLoss(nn.Module): """Triplet loss Arguments: nn {[type]} -- [description] """ def __init__(self, margin): super(TripletMarginLoss, self).__init__() self.margin = margin self.pdist = PairwiseDistance() def forward(self, anchor, positive, negative): d_p = self.pdist(anchor, positive) d_n = self.pdist(anchor, negative) dist_hinge = torch.clamp(self.margin + d_p - d_n, min=0) loss = torch.mean(dist_hinge) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'margin': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_clamp_mean_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp5 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp6 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp11 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp12 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp17 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp18 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp28 = tl.load(in_ptr2 + (r0 + 64 * r1), None) tmp32 = tl.load(in_ptr2 + (16 + r0 + 64 * r1), None) tmp37 = tl.load(in_ptr2 + (32 + r0 + 64 * r1), None) tmp42 = tl.load(in_ptr2 + (48 + r0 + 64 * r1), None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tmp3 * tmp3 tmp7 = tmp5 - tmp6 tmp8 = tl_math.abs(tmp7) tmp9 = tmp8 * tmp8 tmp10 = tmp4 + tmp9 tmp13 = tmp11 - tmp12 tmp14 = tl_math.abs(tmp13) tmp15 = tmp14 * tmp14 tmp16 = tmp10 + tmp15 tmp19 = tmp17 - tmp18 tmp20 = tl_math.abs(tmp19) tmp21 = tmp20 * tmp20 tmp22 = tmp16 + tmp21 tmp23 = 0.0001 tmp24 = tmp22 + tmp23 tmp25 = libdevice.sqrt(tmp24) tmp26 = 4.0 tmp27 = tmp25 + tmp26 tmp29 = tmp0 - tmp28 tmp30 = tl_math.abs(tmp29) tmp31 = tmp30 * tmp30 tmp33 = tmp5 - tmp32 tmp34 = tl_math.abs(tmp33) tmp35 = tmp34 * tmp34 tmp36 = tmp31 + tmp35 tmp38 = tmp11 - tmp37 tmp39 = tl_math.abs(tmp38) tmp40 = tmp39 * tmp39 tmp41 = tmp36 + tmp40 tmp43 = tmp17 - tmp42 tmp44 = tl_math.abs(tmp43) tmp45 = tmp44 * tmp44 tmp46 = tmp41 + tmp45 tmp47 = tmp46 + tmp23 tmp48 = libdevice.sqrt(tmp47) tmp49 = tmp27 - tmp48 tmp50 = 0.0 tmp51 = triton_helpers.maximum(tmp49, tmp50) tmp52 = tl.broadcast_to(tmp51, [XBLOCK, RBLOCK]) tmp54 = tl.sum(tmp52, 1)[:, None] tmp55 = 64.0 tmp56 = tmp54 / tmp55 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp56, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_per_fused_abs_add_clamp_mean_pow_sub_sum_0[grid(1)](buf2, arg0_1, arg1_1, arg2_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf2, class PairwiseDistance(nn.Module): """class for calculating distance Arguments: nn {[type]} -- [description] """ def __init__(self, smooth=0.0001): """Initializer Arguments: smooth {int} -- [description] """ super(PairwiseDistance, self).__init__() self.smooth = smooth def forward(self, x1, x2): """x1, x2 represent input data Arguments: x1 {[type]} -- [description] x2 {[type]} -- [description] Returns: [type] -- [description] """ assert x1.size() == x2.size() diff = torch.abs(x1 - x2) out = torch.pow(diff, 2).sum(dim=1) return torch.pow(out + self.smooth, 0.5) class TripletMarginLossNew(nn.Module): """Triplet loss Arguments: nn {[type]} -- [description] """ def __init__(self, margin): super(TripletMarginLossNew, self).__init__() self.margin = margin self.pdist = PairwiseDistance() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
jce2090/palmprint-recognition
TripletMarginLoss
false
3,713
[ "MIT" ]
0
d2d93c6817afe1b67650dae6516a3d180aaeca38
https://github.com/jce2090/palmprint-recognition/tree/d2d93c6817afe1b67650dae6516a3d180aaeca38
NeuralNerwork
import torch import torch.nn as nn import torch.nn.functional as F class NeuralNerwork(nn.Module): def __init__(self, n_features, n_targets): super(NeuralNerwork, self).__init__() self.fc1 = nn.Linear(n_features, 15) self.fc2 = nn.Linear(15, 10) self.fc3 = nn.Linear(10, n_targets) self.dropout = nn.Dropout(0.2) self.sigmoid = nn.Sigmoid() def forward(self, x): x = F.relu(self.fc1(x)) x = self.dropout(x) x = F.relu(self.fc2(x)) x = self.dropout(x) return self.sigmoid(self.fc3(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_features': 4, 'n_targets': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 960 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 15 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 10 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (15, 4), (4, 1)) assert_size_stride(primals_2, (15,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (10, 15), (15, 1)) assert_size_stride(primals_5, (10,), (1,)) assert_size_stride(primals_6, (4, 10), (10, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 15), (15, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 15), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 15), (240, 60, 15, 1), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 15), (240, 60, 15, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(960)](buf1, primals_2, buf7, 960, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 10), (10, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 15), (15, 1), 0), reinterpret_tensor(primals_4, (15, 10), (1, 15), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 10), (160, 40, 10, 1), 0) del buf2 buf6 = empty_strided_cuda((4, 4, 4, 10), (160, 40, 10, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(640)](buf3, primals_5, buf6, 640, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 10), (10, 1), 0), reinterpret_tensor(primals_6, (10, 4), (1, 10), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused_sigmoid_2[grid(256)](buf5, primals_7, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_7 return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 15), (15, 1), 0), reinterpret_tensor( buf3, (64, 10), (10, 1), 0), buf5, primals_6, buf6, primals_4, buf7 class NeuralNerworkNew(nn.Module): def __init__(self, n_features, n_targets): super(NeuralNerworkNew, self).__init__() self.fc1 = nn.Linear(n_features, 15) self.fc2 = nn.Linear(15, 10) self.fc3 = nn.Linear(10, n_targets) self.dropout = nn.Dropout(0.2) self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
jf20541/NeuralNetworks
NeuralNerwork
false
3,714
[ "MIT" ]
0
ee36b734880f30d9e8691205dadcd074795bdff3
https://github.com/jf20541/NeuralNetworks/tree/ee36b734880f30d9e8691205dadcd074795bdff3
ScModel
import torch import torch as t import torch.nn as nn from torch.nn.parameter import Parameter class ScModel(nn.Module): """ Model for single cell data """ def __init__(self, n_genes: 'int', n_celltypes: 'int', device: 't.device' ) ->None: super().__init__() self.K = n_celltypes self.G = n_genes self.theta = Parameter(t.Tensor(self.G, self.K)) self.R = t.Tensor(self.G, self.K) self.o = Parameter(t.Tensor(self.G, 1)) nn.init.normal_(self.o, mean=0.0, std=1.0) nn.init.normal_(self.theta, mean=0.0, std=1.0) self.nb = t.distributions.NegativeBinomial self.softpl = nn.functional.softplus self.logsig = nn.functional.logsigmoid def _llnb(self, x: 't.Tensor', meta: 't.LongTensor', sf: 't.Tensor' ) ->t.Tensor: """Log Likelihood for NB-model Returns the log likelihood for rates and logodds taken as a function of the observed counts. Assumes that single cell data is negative binomial distributed. Returns ------- The log likelihood """ log_unnormalized_prob = sf * self.R[:, meta] * self.logsig(-self.o ) + x * self.logsig(self.o) log_normalization = -t.lgamma(sf * self.R[:, meta] + x) + t.lgamma( 1.0 + x) + t.lgamma(sf * self.R[:, meta]) ll = t.sum(log_unnormalized_prob - log_normalization) return ll def forward(self, x: 't.Tensor', meta: 't.LongTensor', sf: 't.Tensor', **kwargs) ->t.Tensor: """Forward pass during optimization""" self.R = self.softpl(self.theta) self.loss = -self._llnb(x.transpose(1, 0), meta, sf) return self.loss def __str__(self): return 'sc_model' def get_inputs(): return [torch.ones([4, 4], dtype=torch.int64), torch.ones([4], dtype= torch.int64), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_genes': 4, 'n_celltypes': 4, 'device': 0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch as t import torch.nn as nn from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_softplus_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_red_fused_add_index_lgamma_log_sigmoid_forward_mul_neg_sub_sum_1( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 256 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp40 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r3 = rindex r0 = rindex % 4 r1 = rindex // 4 % 4 tmp0 = tl.load(in_ptr0 + r3, rmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr1 + r0, rmask, eviction_policy='evict_last', other=0.0) tmp9 = tl.load(in_ptr3 + r1, rmask, eviction_policy='evict_last', other=0.0) tmp19 = tl.load(in_ptr4 + (r1 + 4 * r0), rmask, eviction_policy= 'evict_last', other=0.0) tmp2 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp3 = tmp1 + tmp2 tmp4 = tmp1 < 0 tmp5 = tl.where(tmp4, tmp3, tmp1) tl.device_assert((0 <= tmp5) & (tmp5 < 4) | ~rmask, 'index out of bounds: 0 <= tmp5 < 4') tmp7 = tl.load(in_ptr2 + (tmp5 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp8 = tmp0 * tmp7 tmp10 = -tmp9 tmp11 = 0.0 tmp12 = triton_helpers.minimum(tmp11, tmp10) tmp13 = tl_math.abs(tmp10) tmp14 = -tmp13 tmp15 = tl_math.exp(tmp14) tmp16 = libdevice.log1p(tmp15) tmp17 = tmp12 - tmp16 tmp18 = tmp8 * tmp17 tmp20 = tmp19.to(tl.float32) tmp21 = triton_helpers.minimum(tmp11, tmp9) tmp22 = tl_math.abs(tmp9) tmp23 = -tmp22 tmp24 = tl_math.exp(tmp23) tmp25 = libdevice.log1p(tmp24) tmp26 = tmp21 - tmp25 tmp27 = tmp20 * tmp26 tmp28 = tmp18 + tmp27 tmp29 = tmp8 + tmp20 tmp30 = libdevice.lgamma(tmp29) tmp31 = -tmp30 tmp32 = 1.0 tmp33 = tmp20 + tmp32 tmp34 = libdevice.lgamma(tmp33) tmp35 = tmp31 + tmp34 tmp36 = libdevice.lgamma(tmp8) tmp37 = tmp35 + tmp36 tmp38 = tmp28 - tmp37 tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK]) tmp41 = _tmp40 + tmp39 _tmp40 = tl.where(rmask, tmp41, _tmp40) tmp40 = tl.sum(_tmp40, 1)[:, None] tmp42 = -tmp40 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp42, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_softplus_0[grid(16)](primals_1, buf0, 16, XBLOCK= 16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_red_fused_add_index_lgamma_log_sigmoid_forward_mul_neg_sub_sum_1[ grid(1)](buf3, primals_4, primals_3, buf0, primals_5, primals_2, 1, 256, XBLOCK=1, RBLOCK=256, num_warps=8, num_stages=1) return (buf3, buf0, primals_1, primals_2, primals_3, primals_4, primals_5, buf0) class ScModelNew(nn.Module): """ Model for single cell data """ def __init__(self, n_genes: 'int', n_celltypes: 'int', device: 't.device' ) ->None: super().__init__() self.K = n_celltypes self.G = n_genes self.theta = Parameter(t.Tensor(self.G, self.K)) self.R = t.Tensor(self.G, self.K) self.o = Parameter(t.Tensor(self.G, 1)) nn.init.normal_(self.o, mean=0.0, std=1.0) nn.init.normal_(self.theta, mean=0.0, std=1.0) self.nb = t.distributions.NegativeBinomial self.softpl = nn.functional.softplus self.logsig = nn.functional.logsigmoid def _llnb(self, x: 't.Tensor', meta: 't.LongTensor', sf: 't.Tensor' ) ->t.Tensor: """Log Likelihood for NB-model Returns the log likelihood for rates and logodds taken as a function of the observed counts. Assumes that single cell data is negative binomial distributed. Returns ------- The log likelihood """ log_unnormalized_prob = sf * self.R[:, meta] * self.logsig(-self.o ) + x * self.logsig(self.o) log_normalization = -t.lgamma(sf * self.R[:, meta] + x) + t.lgamma( 1.0 + x) + t.lgamma(sf * self.R[:, meta]) ll = t.sum(log_unnormalized_prob - log_normalization) return ll def __str__(self): return 'sc_model' def forward(self, input_0, input_1, input_2): primals_1 = self.theta primals_5 = self.o primals_2 = input_0 primals_3 = input_1 primals_4 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
jfnavarro/stereoscope
ScModel
false
3,715
[ "MIT" ]
0
0a64db45291c3a9b72abdf13183614a10f3dac40
https://github.com/jfnavarro/stereoscope/tree/0a64db45291c3a9b72abdf13183614a10f3dac40
GCN
import torch from torch import nn import torch.nn.functional as F import torch.nn.parallel class Conv2D(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, padding= 'same', stride=1, dilation=1, groups=1): super(Conv2D, self).__init__() assert type(kernel_size) in [int, tuple ], 'Allowed kernel type [int or tuple], not {}'.format(type( kernel_size)) assert padding == 'same', 'Allowed padding type {}, not {}'.format( 'same', padding) self.kernel_size = kernel_size if isinstance(kernel_size, tuple): self.h_kernel = kernel_size[0] self.w_kernel = kernel_size[1] else: self.h_kernel = kernel_size self.w_kernel = kernel_size self.padding = padding self.stride = stride self.dilation = dilation self.groups = groups self.conv = nn.Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=self.stride, dilation=self.dilation, groups=self.groups) def forward(self, x): if self.padding == 'same': height, width = x.shape[2:] h_pad_need = max(0, (height - 1) * self.stride + self.h_kernel - height) w_pad_need = max(0, (width - 1) * self.stride + self.w_kernel - width) pad_left = w_pad_need // 2 pad_right = w_pad_need - pad_left pad_top = h_pad_need // 2 pad_bottom = h_pad_need - pad_top padding = pad_left, pad_right, pad_top, pad_bottom x = F.pad(x, padding, 'constant', 0) x = self.conv(x) return x class GCN(nn.Module): """ Large Kernel Matters -- https://arxiv.org/abs/1703.02719 """ def __init__(self, in_channels, out_channels, k=3): super(GCN, self).__init__() self.conv_l1 = Conv2D(in_channels=in_channels, out_channels= out_channels, kernel_size=(k, 1), padding='same') self.conv_l2 = Conv2D(in_channels=out_channels, out_channels= out_channels, kernel_size=(1, k), padding='same') self.conv_r1 = Conv2D(in_channels=in_channels, out_channels= out_channels, kernel_size=(1, k), padding='same') self.conv_r2 = Conv2D(in_channels=out_channels, out_channels= out_channels, kernel_size=(k, 1), padding='same') def forward(self, x): x1 = self.conv_l1(x) x1 = self.conv_l2(x1) x2 = self.conv_r1(x) x2 = self.conv_r2(x2) out = x1 + x2 return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.nn.functional as F import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 6 x2 = xindex // 24 x3 = xindex % 24 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-4 + x3 + 16 * x2), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp6, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_convolution_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x4 = xindex // 6 x2 = xindex // 24 % 4 x5 = xindex tmp0 = -1 + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-1 + x0 + 4 * x4), tmp5 & xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x2, tmp5 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp5, tmp8, tmp9) tl.store(out_ptr0 + x5, tmp10, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 x2 = xindex tmp0 = -1 + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_convolution_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 6 x4 = xindex // 24 x5 = xindex % 24 x2 = xindex // 24 % 4 x6 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-4 + x5 + 16 * x4), tmp5 & xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x2, tmp5 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp5, tmp8, tmp9) tl.store(out_ptr0 + x6, tmp10, xmask) @triton.jit def triton_poi_fused_add_convolution_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + x3, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 1), (12, 3, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 1, 3), (12, 3, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 1, 3), (12, 3, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 3, 1), (12, 3, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 4), (96, 24, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(384)](primals_1, buf0, 384, XBLOCK=256, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4, 6), (96, 24, 6, 1), torch.float32) triton_poi_fused_constant_pad_nd_convolution_1[grid(384)](buf1, primals_3, buf2, 384, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del primals_3 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = empty_strided_cuda((4, 4, 4, 6), (96, 24, 6, 1), torch.float32) triton_poi_fused_constant_pad_nd_2[grid(384)](primals_1, buf4, 384, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1)) buf6 = empty_strided_cuda((4, 4, 6, 4), (96, 24, 4, 1), torch.float32) triton_poi_fused_constant_pad_nd_convolution_3[grid(384)](buf5, primals_7, buf6, 384, XBLOCK=128, num_warps=4, num_stages=1) del buf5 del primals_7 buf7 = extern_kernels.convolution(buf6, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1)) buf8 = buf3 del buf3 triton_poi_fused_add_convolution_4[grid(256)](buf8, primals_5, buf7, primals_9, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf7 del primals_5 del primals_9 return (buf8, primals_2, primals_4, primals_6, primals_8, buf0, buf2, buf4, buf6) class Conv2D(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, padding= 'same', stride=1, dilation=1, groups=1): super(Conv2D, self).__init__() assert type(kernel_size) in [int, tuple ], 'Allowed kernel type [int or tuple], not {}'.format(type( kernel_size)) assert padding == 'same', 'Allowed padding type {}, not {}'.format( 'same', padding) self.kernel_size = kernel_size if isinstance(kernel_size, tuple): self.h_kernel = kernel_size[0] self.w_kernel = kernel_size[1] else: self.h_kernel = kernel_size self.w_kernel = kernel_size self.padding = padding self.stride = stride self.dilation = dilation self.groups = groups self.conv = nn.Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=self.stride, dilation=self.dilation, groups=self.groups) def forward(self, x): if self.padding == 'same': height, width = x.shape[2:] h_pad_need = max(0, (height - 1) * self.stride + self.h_kernel - height) w_pad_need = max(0, (width - 1) * self.stride + self.w_kernel - width) pad_left = w_pad_need // 2 pad_right = w_pad_need - pad_left pad_top = h_pad_need // 2 pad_bottom = h_pad_need - pad_top padding = pad_left, pad_right, pad_top, pad_bottom x = F.pad(x, padding, 'constant', 0) x = self.conv(x) return x class GCNNew(nn.Module): """ Large Kernel Matters -- https://arxiv.org/abs/1703.02719 """ def __init__(self, in_channels, out_channels, k=3): super(GCNNew, self).__init__() self.conv_l1 = Conv2D(in_channels=in_channels, out_channels= out_channels, kernel_size=(k, 1), padding='same') self.conv_l2 = Conv2D(in_channels=out_channels, out_channels= out_channels, kernel_size=(1, k), padding='same') self.conv_r1 = Conv2D(in_channels=in_channels, out_channels= out_channels, kernel_size=(1, k), padding='same') self.conv_r2 = Conv2D(in_channels=out_channels, out_channels= out_channels, kernel_size=(k, 1), padding='same') def forward(self, input_0): primals_2 = self.conv_l1.conv.weight primals_3 = self.conv_l1.conv.bias primals_4 = self.conv_l2.conv.weight primals_5 = self.conv_l2.conv.bias primals_6 = self.conv_r1.conv.weight primals_7 = self.conv_r1.conv.bias primals_8 = self.conv_r2.conv.weight primals_9 = self.conv_r2.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
hav4ik/AdelaiDet
GCN
false
3,716
[ "BSD-2-Clause" ]
0
6ed9c1e1a25a3e25dddfa858ce0f219a30593ce2
https://github.com/hav4ik/AdelaiDet/tree/6ed9c1e1a25a3e25dddfa858ce0f219a30593ce2
maxout
import torch import torch.nn as nn import torch.utils.data class maxout(nn.Module): def __init__(self, in_feature, out_feature, pool_size): super(maxout, self).__init__() self.in_feature = in_feature self.out_feature = out_feature self.pool_size = pool_size self.linear = nn.Linear(in_feature, out_feature * pool_size) def forward(self, x): output = self.linear(x) output = output.view(-1, self.out_feature, self.pool_size) output = output.max(2)[0] return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_feature': 4, 'out_feature': 4, 'pool_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_max_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = tmp0 > tmp1 tmp8 = tmp0 == tmp1 tmp9 = tmp0 != tmp0 tmp10 = tmp1 != tmp1 tmp11 = tmp9 > tmp10 tmp12 = tmp7 | tmp11 tmp13 = tmp9 & tmp10 tmp14 = tmp8 | tmp13 tmp15 = tl.full([1], 0, tl.int64) tmp16 = tl.full([1], 1, tl.int64) tmp17 = tmp15 < tmp16 tmp18 = tmp14 & tmp17 tmp19 = tmp12 | tmp18 tmp20 = tl.where(tmp19, tmp0, tmp1) tmp21 = tl.where(tmp19, tmp15, tmp16) tmp22 = tmp20 > tmp3 tmp23 = tmp20 == tmp3 tmp24 = tmp20 != tmp20 tmp25 = tmp3 != tmp3 tmp26 = tmp24 > tmp25 tmp27 = tmp22 | tmp26 tmp28 = tmp24 & tmp25 tmp29 = tmp23 | tmp28 tmp30 = tl.full([1], 2, tl.int64) tmp31 = tmp21 < tmp30 tmp32 = tmp29 & tmp31 tmp33 = tmp27 | tmp32 tmp34 = tl.where(tmp33, tmp20, tmp3) tmp35 = tl.where(tmp33, tmp21, tmp30) tmp36 = tmp34 > tmp5 tmp37 = tmp34 == tmp5 tmp38 = tmp34 != tmp34 tmp39 = tmp5 != tmp5 tmp40 = tmp38 > tmp39 tmp41 = tmp36 | tmp40 tmp42 = tmp38 & tmp39 tmp43 = tmp37 | tmp42 tmp44 = tl.full([1], 3, tl.int64) tmp45 = tmp35 < tmp44 tmp46 = tmp43 & tmp45 tmp47 = tmp41 | tmp46 tl.where(tmp47, tmp34, tmp5) tmp49 = tl.where(tmp47, tmp35, tmp44) tl.store(out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr1 + x0, tmp49, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.int64) get_raw_stream(0) triton_poi_fused_max_0[grid(256)](buf0, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf2, (64, 4, 1), (4, 1, 1), 0) class maxoutNew(nn.Module): def __init__(self, in_feature, out_feature, pool_size): super(maxoutNew, self).__init__() self.in_feature = in_feature self.out_feature = out_feature self.pool_size = pool_size self.linear = nn.Linear(in_feature, out_feature * pool_size) def forward(self, input_0): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
jiahuanluo/Global-Encoding
maxout
false
3,717
[ "MIT" ]
0
2adb01def9525588b3a75e6f2a5181a3a11464ed
https://github.com/jiahuanluo/Global-Encoding/tree/2adb01def9525588b3a75e6f2a5181a3a11464ed
Gather
import torch import torch.nn as nn class Gather(torch.nn.Module): """ gather """ @staticmethod def modify_commandline_options(parser, is_train): return parser def __init__(self, F, K, use_mask=False): super().__init__() self.K = K self.F = F self.softmax_2 = nn.Softmax(dim=2) self.use_mask = use_mask if use_mask: self.a = nn.Parameter(torch.randn(F, K), requires_grad=True) """image_codeing [N,F,H,W] s_att (spatial attention): [N,K,H,W] K-how many segment s_att(0 or 50) feature: [N,K,F] """ def forward(self, image_coding, s_att, att_mask=None): """ x: b*m*h*w c_att: b*K*h*w """ b, F, h, w = image_coding.size() b_, K, h_, w_ = s_att.size() assert b == b_ and h == h_ and w == w_ and self.K == K and self.F == F if self.use_mask: b__, K__ = att_mask.size() assert b == b__ and self.K == K__ s_att_new = self.softmax_2(s_att.view(b, K, h * w)).permute(0, 2, 1) gather_result = torch.bmm(image_coding.view(b, F, h * w), s_att_new) if self.use_mask: att_mask_new = att_mask.view(b__, 1, K__).expand_as(gather_result) gather_result = att_mask_new * gather_result + (1 - att_mask_new ) * self.a.view(1, F, K).expand_as(gather_result) return gather_result def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'F': 4, 'K': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) get_raw_stream(0) triton_per_fused__softmax_0[grid(16)](arg1_1, buf2, 16, 16, XBLOCK= 8, num_warps=2, num_stages=1) del arg1_1 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(buf2, (4, 16, 4), (64, 1, 16), 0), out=buf3) del arg0_1 del buf2 return buf3, class GatherNew(torch.nn.Module): """ gather """ @staticmethod def modify_commandline_options(parser, is_train): return parser def __init__(self, F, K, use_mask=False): super().__init__() self.K = K self.F = F self.softmax_2 = nn.Softmax(dim=2) self.use_mask = use_mask if use_mask: self.a = nn.Parameter(torch.randn(F, K), requires_grad=True) """image_codeing [N,F,H,W] s_att (spatial attention): [N,K,H,W] K-how many segment s_att(0 or 50) feature: [N,K,F] """ def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
jhp038/fashion_project
Gather
false
3,718
[ "MIT" ]
0
719533dc60155801f567e6a9183d7a5036ee1166
https://github.com/jhp038/fashion_project/tree/719533dc60155801f567e6a9183d7a5036ee1166