entry_point
stringlengths
1
65
original_triton_python_code
stringlengths
208
619k
optimised_triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
Baseline
import torch import torch.nn as nn class Baseline(nn.Module): """Baseline """ def __init__(self, hid_dim, x_dim, binary_dim, inp_dim): super(Baseline, self).__init__() self.x_dim = x_dim self.binary_dim = binary_dim self.inp_dim = inp_dim self.hid_dim = hid_dim self.linear1 = nn.Linear(x_dim + self.binary_dim + self.inp_dim, self.hid_dim) self.linear2 = nn.Linear(self.hid_dim, 1) def forward(self, x, binary, inp): """Estimate agent's loss based on the agent's input. Args: x: Image features. binary: Communication message. inp: Hidden state (used when agent is the Receiver). Output: score: An estimate of the agent's loss. """ features = [] if x is not None: features.append(x) if binary is not None: features.append(binary) if inp is not None: features.append(inp) features = torch.cat(features, 1) hidden = self.linear1(features).clamp(min=0) pred_score = self.linear2(hidden) return pred_score def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'hid_dim': 4, 'x_dim': 4, 'binary_dim': 4, 'inp_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_clamp_ge_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp2 >= tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp5, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 12), (12, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(48)](primals_1, primals_2, primals_3, buf0, 48, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (12, 4), (1, 12), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_clamp_ge_1[grid(16)](buf1, primals_5, buf2, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf1 del primals_5 buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, buf2, reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 return buf4, buf0, buf2, primals_6, buf5 class BaselineNew(nn.Module): """Baseline """ def __init__(self, hid_dim, x_dim, binary_dim, inp_dim): super(BaselineNew, self).__init__() self.x_dim = x_dim self.binary_dim = binary_dim self.inp_dim = inp_dim self.hid_dim = hid_dim self.linear1 = nn.Linear(x_dim + self.binary_dim + self.inp_dim, self.hid_dim) self.linear2 = nn.Linear(self.hid_dim, 1) def forward(self, input_0, input_1, input_2): primals_4 = self.linear1.weight primals_5 = self.linear1.bias primals_6 = self.linear2.weight primals_7 = self.linear2.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
nyu-dl/MultimodalGame
Baseline
false
16,199
[ "BSD-3-Clause" ]
54
0782a7bf3cf5125cd7c35a243e97f0e9e016fca3
https://github.com/nyu-dl/MultimodalGame/tree/0782a7bf3cf5125cd7c35a243e97f0e9e016fca3
TVLoss
import torch from torch import nn from torch.nn import functional as F class TVLoss(nn.Module): """L2 total variation loss, as in Mahendran et al.""" def forward(self, input): input = F.pad(input, (0, 1, 0, 1), 'replicate') x_diff = input[..., :-1, 1:] - input[..., :-1, :-1] y_diff = input[..., 1:, :-1] - input[..., :-1, :-1] return (x_diff ** 2 + y_diff ** 2).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_mean_pow_sub_0(in_out_ptr0, in_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex % 4 r1 = rindex // 4 % 4 r2 = rindex // 16 tmp0 = tl.load(in_ptr0 + (4 * (3 * (3 <= r1) + r1 * (r1 < 3)) + 16 * r2 + (3 * (3 <= 1 + r0) + (1 + r0) * (1 + r0 < 3))), None) tmp1 = tl.load(in_ptr0 + (4 * (3 * (3 <= r1) + r1 * (r1 < 3)) + 16 * r2 + (3 * (3 <= r0) + r0 * (r0 < 3))), None) tmp4 = tl.load(in_ptr0 + (4 * (3 * (3 <= 1 + r1) + (1 + r1) * (1 + r1 < 3)) + 16 * r2 + (3 * (3 <= r0) + r0 * (r0 < 3))), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp5 = tmp4 - tmp1 tmp6 = tmp5 * tmp5 tmp7 = tmp3 + tmp6 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = 256.0 tmp12 = tmp10 / tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_mean_pow_sub_0[grid(1)](buf1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 return buf1, class TVLossNew(nn.Module): """L2 total variation loss, as in Mahendran et al.""" def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
olaviinha/style-transfer-pytorch
TVLoss
false
16,200
[ "MIT" ]
290
9bdb2d932a31b6cf0ac7b651dc38b740c3e37fe8
https://github.com/olaviinha/style-transfer-pytorch/tree/9bdb2d932a31b6cf0ac7b651dc38b740c3e37fe8
Conv3dMaxPool
import torch from torch import nn class Conv3dMaxPool(nn.Module): def __init__(self, out_channels: 'int', in_channels: 'int'): super().__init__() self.sat_conv3d = nn.Conv3d(in_channels=in_channels, out_channels= out_channels, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.sat_maxpool = nn.MaxPool3d(3, stride=(1, 2, 2), padding=(1, 1, 1)) def forward(self, x): x = self.sat_conv3d(x) return self.sat_maxpool(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'out_channels': 4, 'in_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = torch.ops.aten.max_pool3d_with_indices.default( reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), [3, 3, 3], [1, 2, 2], [1, 1, 1]) buf3 = buf2[0] buf4 = buf2[1] del buf2 return buf3, primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf4 class Conv3dMaxPoolNew(nn.Module): def __init__(self, out_channels: 'int', in_channels: 'int'): super().__init__() self.sat_conv3d = nn.Conv3d(in_channels=in_channels, out_channels= out_channels, kernel_size=(3, 3, 3), padding=(1, 1, 1)) self.sat_maxpool = nn.MaxPool3d(3, stride=(1, 2, 2), padding=(1, 1, 1)) def forward(self, input_0): primals_1 = self.sat_conv3d.weight primals_2 = self.sat_conv3d.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
openclimatefix/predict_pv_yield
Conv3dMaxPool
false
16,201
[ "MIT" ]
47
83f27bd392190f1771221e92bfebb879bf562f5d
https://github.com/openclimatefix/predict_pv_yield/tree/83f27bd392190f1771221e92bfebb879bf562f5d
Conv2d
import torch import torch.utils.data import torch.nn as nn class Conv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, relu=True, same_padding=False): super(Conv2d, self).__init__() padding = int((kernel_size - 1) / 2) if same_padding else 0 self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding) self.relu = nn.LeakyReLU(0.1, inplace=True) if relu else None def forward(self, x): x = self.conv(x) if self.relu is not None: x = self.relu(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(in_out_ptr0 + x2, tmp7, xmask) tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool) get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_0[grid(16) ](buf1, primals_2, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return buf1, primals_1, primals_3, buf2 class Conv2dNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, relu=True, same_padding=False): super(Conv2dNew, self).__init__() padding = int((kernel_size - 1) / 2) if same_padding else 0 self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding) self.relu = nn.LeakyReLU(0.1, inplace=True) if relu else None def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ojasjoshi/Selective_Deblur_GANs
Conv2d
false
16,202
[ "MIT" ]
1,663
9ac256b41b62c50c8b967f7e6fa7ecb4c7305889
https://github.com/ojasjoshi/Selective_Deblur_GANs/tree/9ac256b41b62c50c8b967f7e6fa7ecb4c7305889
PartitionLoss
import torch import torch.nn as nn class PartitionLoss(nn.Module): def __init__(self): super(PartitionLoss, self).__init__() def forward(self, x): num_head = x.size(1) if num_head > 1: var = x.var(dim=1).mean() loss = torch.log(1 + num_head / var) else: loss = 0 return loss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_log_mean_mul_reciprocal_var_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp3 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = 3.0 tmp21 = tmp19 / tmp20 tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK]) tmp24 = tl.sum(tmp22, 1)[:, None] tmp25 = 64.0 tmp26 = tmp24 / tmp25 tmp27 = tl.full([1, 1], 1, tl.int32) tmp28 = tmp27 / tmp26 tmp29 = tmp28 * tmp7 tmp30 = 1.0 tmp31 = tmp29 + tmp30 tmp32 = tl_math.log(tmp31) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp32, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_log_mean_mul_reciprocal_var_0[grid(1)](buf1, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf1, class PartitionLossNew(nn.Module): def __init__(self): super(PartitionLossNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
orena1/DAN
PartitionLoss
false
16,203
[ "MIT" ]
50
49247ad0cad2a67057d184fa92d15fe2e7bb2cb6
https://github.com/orena1/DAN/tree/49247ad0cad2a67057d184fa92d15fe2e7bb2cb6
ActivationLoss
import torch import torch.utils.data from torch import nn class ActivationLoss(nn.Module): def __init__(self): super(ActivationLoss, self).__init__() def forward(self, zero, one, labels): loss_act = torch.abs(one - labels.data) + torch.abs(zero - (1.0 - labels.data)) return 1 / labels.shape[0] * loss_act.sum() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_mul_rsub_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp4 = tl.load(in_ptr2 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp7 = tmp4 - tmp6 tmp8 = tl_math.abs(tmp7) tmp9 = tmp3 + tmp8 tmp10 = tl.broadcast_to(tmp9, [RBLOCK]) tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0)) tmp13 = 0.25 tmp14 = tmp12 * tmp13 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp14, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_add_mul_rsub_sub_sum_0[grid(1)](buf1, arg1_1, arg0_1, arg2_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf1, class ActivationLossNew(nn.Module): def __init__(self): super(ActivationLossNew, self).__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
nviable/ClassNSeg
ActivationLoss
false
16,204
[ "BSD-3-Clause" ]
68
87e506fddb9f36ef14f9bd1f6496f86d7faef0fd
https://github.com/nviable/ClassNSeg/tree/87e506fddb9f36ef14f9bd1f6496f86d7faef0fd
IPDFeature
import math import torch import torch as th import torch.nn as nn class IPDFeature(nn.Module): """ Compute inter-channel phase difference """ def __init__(self, ipd_index='1,0;2,0;3,0;4,0;5,0;6,0', cos=True, sin=False ): super(IPDFeature, self).__init__() def split_index(sstr): return [tuple(map(int, p.split(','))) for p in sstr.split(';')] pair = split_index(ipd_index) self.index_l = [t[0] for t in pair] self.index_r = [t[1] for t in pair] self.ipd_index = ipd_index self.cos = cos self.sin = sin self.num_pairs = len(pair) * 2 if cos and sin else len(pair) def extra_repr(self): return f'ipd_index={self.ipd_index}, cos={self.cos}, sin={self.sin}' def forward(self, p): """ Accept multi-channel phase and output inter-channel phase difference args p: phase matrix, N x C x F x T return ipd: N x MF x T """ if p.dim() not in [3, 4]: raise RuntimeError('{} expect 3/4D tensor, but got {:d} instead' .format(self.__name__, p.dim())) if p.dim() == 3: p = p.unsqueeze(0) N, _, _, T = p.shape pha_dif = p[:, self.index_l] - p[:, self.index_r] if self.cos: ipd = th.cos(pha_dif) if self.sin: ipd = th.cat([ipd, th.sin(pha_dif)], 2) else: ipd = th.fmod(pha_dif, 2 * math.pi) - math.pi ipd = ipd.view(N, -1, T) return ipd def get_inputs(): return [torch.rand([7, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cos_index_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 x0 = xindex % 16 x2 = xindex tmp0 = x1 tmp1 = tl.full([1], 3, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 2, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tl.where(tmp6, tmp5, tmp1) tmp8 = tl.where(tmp4, tmp3, tmp7) tmp9 = tl.full([1], 4, tl.int64) tmp10 = tmp0 < tmp9 tmp11 = tl.full([1], 5, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tl.full([1], 6, tl.int64) tmp14 = tl.where(tmp12, tmp11, tmp13) tmp15 = tl.where(tmp10, tmp9, tmp14) tmp16 = tl.where(tmp2, tmp8, tmp15) tmp17 = tl.load(in_ptr0 + (x0 + 16 * tmp16), xmask) tmp18 = tl.full([1], 0, tl.int64) tmp19 = tl.where(tmp6, tmp18, tmp18) tmp20 = tl.where(tmp4, tmp18, tmp19) tmp21 = tl.where(tmp12, tmp18, tmp18) tmp22 = tl.where(tmp10, tmp18, tmp21) tmp23 = tl.where(tmp2, tmp20, tmp22) tmp24 = tl.load(in_ptr0 + (x0 + 16 * tmp23), xmask) tmp25 = tmp17 - tmp24 tmp26 = tl_math.cos(tmp25) tl.store(out_ptr0 + x2, tmp26, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (7, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 6, 4, 4), (96, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cos_index_sub_0[grid(96)](arg0_1, buf0, 96, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (1, 24, 4), (96, 4, 1), 0), class IPDFeatureNew(nn.Module): """ Compute inter-channel phase difference """ def __init__(self, ipd_index='1,0;2,0;3,0;4,0;5,0;6,0', cos=True, sin=False ): super(IPDFeatureNew, self).__init__() def split_index(sstr): return [tuple(map(int, p.split(','))) for p in sstr.split(';')] pair = split_index(ipd_index) self.index_l = [t[0] for t in pair] self.index_r = [t[1] for t in pair] self.ipd_index = ipd_index self.cos = cos self.sin = sin self.num_pairs = len(pair) * 2 if cos and sin else len(pair) def extra_repr(self): return f'ipd_index={self.ipd_index}, cos={self.cos}, sin={self.sin}' def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
oucxlw/ConferencingSpeech2021
IPDFeature
false
16,205
[ "Apache-2.0" ]
98
617df8116c0510b2addadb1de374d7b50eea4f2b
https://github.com/oucxlw/ConferencingSpeech2021/tree/617df8116c0510b2addadb1de374d7b50eea4f2b
MDNLayer
import torch from torch import nn from torch.nn import functional as F class MDNLayer(nn.Module): """ Mixture Density Network layer The input maps to the parameters of a Mixture of Gaussians (MoG) probability distribution, where each Gaussian has out_dim dimensions and diagonal covariance. If dim_wise is True, features for each dimension are modeld by independent 1-D GMMs instead of modeling jointly. This would workaround training difficulty especially for high dimensional data. Implementation references: 1. Mixture Density Networks by Mike Dusenberry https://mikedusenberry.com/mixture-density-networks 2. PRML book https://www.microsoft.com/en-us/research/people/cmbishop/prml-book/ 3. sagelywizard/pytorch-mdn https://github.com/sagelywizard/pytorch-mdn 4. sksq96/pytorch-mdn https://github.com/sksq96/pytorch-mdn Attributes: in_dim (int): the number of dimensions in the input out_dim (int): the number of dimensions in the output num_gaussians (int): the number of mixture component dim_wise (bool): whether to model data for each dimension seperately """ def __init__(self, in_dim, out_dim, num_gaussians=30, dim_wise=False): super(MDNLayer, self).__init__() self.in_dim = in_dim self.out_dim = out_dim self.num_gaussians = num_gaussians self.dim_wise = dim_wise odim_log_pi = out_dim * num_gaussians if dim_wise else num_gaussians self.log_pi = nn.Linear(in_dim, odim_log_pi) self.log_sigma = nn.Linear(in_dim, out_dim * num_gaussians) self.mu = nn.Linear(in_dim, out_dim * num_gaussians) def forward(self, minibatch): """Forward for MDN Args: minibatch (torch.Tensor): tensor of shape (B, T, D_in) B is the batch size and T is data lengths of this batch, and D_in is in_dim. Returns: torch.Tensor: Tensor of shape (B, T, G) or (B, T, G, D_out) Log of mixture weights. G is num_gaussians and D_out is out_dim. torch.Tensor: Tensor of shape (B, T, G, D_out) the log of standard deviation of each Gaussians. torch.Tensor: Tensor of shape (B, T, G, D_out) mean of each Gaussians """ B = len(minibatch) if self.dim_wise: log_pi = self.log_pi(minibatch).view(B, -1, self.num_gaussians, self.out_dim) log_pi = F.log_softmax(log_pi, dim=2) else: log_pi = F.log_softmax(self.log_pi(minibatch), dim=2) log_sigma = self.log_sigma(minibatch) log_sigma = log_sigma.view(B, -1, self.num_gaussians, self.out_dim) mu = self.mu(minibatch) mu = mu.view(B, -1, self.num_gaussians, self.out_dim) return log_pi, log_sigma, mu def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1920 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 30 x2 = xindex // 120 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 120 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (30 + x0 + 120 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (60 + x0 + 120 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (90 + x0 + 120 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1920 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 30 x2 = xindex // 120 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 120 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (30 + x0 + 120 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (60 + x0 + 120 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (90 + x0 + 120 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x3, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (30, 4), (4, 1)) assert_size_stride(primals_3, (30,), (1,)) assert_size_stride(primals_4, (120, 4), (4, 1)) assert_size_stride(primals_5, (120,), (1,)) assert_size_stride(primals_6, (120, 4), (4, 1)) assert_size_stride(primals_7, (120,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 30), (30, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 30), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4, 4, 30), (480, 120, 30, 1), torch. float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(1920)](buf0, buf1, 1920, XBLOCK=128, num_warps=4, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 30), (480, 120, 30, 1), 0) del buf0 triton_poi_fused__log_softmax_1[grid(1920)](buf1, buf2, 1920, XBLOCK=256, num_warps=4, num_stages=1) del buf1 buf3 = empty_strided_cuda((64, 120), (120, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 120), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_4 del primals_5 buf4 = empty_strided_cuda((64, 120), (120, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 120), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_6 del primals_7 return buf2, reinterpret_tensor(buf3, (4, 16, 30, 4), (1920, 120, 4, 1), 0 ), reinterpret_tensor(buf4, (4, 16, 30, 4), (1920, 120, 4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf2 class MDNLayerNew(nn.Module): """ Mixture Density Network layer The input maps to the parameters of a Mixture of Gaussians (MoG) probability distribution, where each Gaussian has out_dim dimensions and diagonal covariance. If dim_wise is True, features for each dimension are modeld by independent 1-D GMMs instead of modeling jointly. This would workaround training difficulty especially for high dimensional data. Implementation references: 1. Mixture Density Networks by Mike Dusenberry https://mikedusenberry.com/mixture-density-networks 2. PRML book https://www.microsoft.com/en-us/research/people/cmbishop/prml-book/ 3. sagelywizard/pytorch-mdn https://github.com/sagelywizard/pytorch-mdn 4. sksq96/pytorch-mdn https://github.com/sksq96/pytorch-mdn Attributes: in_dim (int): the number of dimensions in the input out_dim (int): the number of dimensions in the output num_gaussians (int): the number of mixture component dim_wise (bool): whether to model data for each dimension seperately """ def __init__(self, in_dim, out_dim, num_gaussians=30, dim_wise=False): super(MDNLayerNew, self).__init__() self.in_dim = in_dim self.out_dim = out_dim self.num_gaussians = num_gaussians self.dim_wise = dim_wise odim_log_pi = out_dim * num_gaussians if dim_wise else num_gaussians self.log_pi = nn.Linear(in_dim, odim_log_pi) self.log_sigma = nn.Linear(in_dim, out_dim * num_gaussians) self.mu = nn.Linear(in_dim, out_dim * num_gaussians) def forward(self, input_0): primals_2 = self.log_pi.weight primals_3 = self.log_pi.bias primals_4 = self.log_sigma.weight primals_5 = self.log_sigma.bias primals_6 = self.mu.weight primals_7 = self.mu.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1], output[2]
oatsu-gh/nnsvs
MDNLayer
false
16,206
[ "MIT" ]
298
510f37bc1d1f15282646e4d34435b5d63686cf40
https://github.com/oatsu-gh/nnsvs/tree/510f37bc1d1f15282646e4d34435b5d63686cf40
L1Norm
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.nn.init class L1Norm(nn.Module): def __init__(self): super(L1Norm, self).__init__() self.eps = 1e-10 def forward(self, x): norm = torch.sum(torch.abs(x), dim=1) + self.eps x = x / norm.expand_as(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.nn.init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask, eviction_policy= 'evict_last') tmp2 = tl_math.abs(tmp1) tmp4 = tl_math.abs(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.abs(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.abs(tmp9) tmp11 = tmp8 + tmp10 tmp12 = 1e-10 tmp13 = tmp11 + tmp12 tmp14 = tmp0 / tmp13 tl.store(out_ptr0 + x3, tmp14, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class L1NormNew(nn.Module): def __init__(self): super(L1NormNew, self).__init__() self.eps = 1e-10 def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
oskyhn/CNNs-Without-Borders
L1Norm
false
16,207
[ "BSD-3-Clause" ]
74
4fae1d8fd64c3c917f5c78c3513a60572af961b1
https://github.com/oskyhn/CNNs-Without-Borders/tree/4fae1d8fd64c3c917f5c78c3513a60572af961b1
TimeIntervalTransformerLayer
import torch import numpy as np import torch.nn as nn import torch.distributions class TimeIntervalMultiHeadAttention(nn.Module): def __init__(self, d_model, n_heads, kq_same=False, bias=True): super().__init__() """ It also needs position and interaction (time interval) key/value input. """ self.d_model = d_model self.h = n_heads self.d_k = self.d_model // self.h self.kq_same = kq_same self.v_linear = nn.Linear(d_model, d_model, bias=bias) self.k_linear = nn.Linear(d_model, d_model, bias=bias) if not kq_same: self.q_linear = nn.Linear(d_model, d_model, bias=bias) def forward(self, q, k, v, pos_k, pos_v, inter_k, inter_v, mask): bs, seq_len = k.size(0), k.size(1) k = (self.k_linear(k) + pos_k).view(bs, seq_len, self.h, self.d_k) if not self.kq_same: q = self.q_linear(q).view(bs, seq_len, self.h, self.d_k) else: q = self.k_linear(q).view(bs, seq_len, self.h, self.d_k) v = (self.v_linear(v) + pos_v).view(bs, seq_len, self.h, self.d_k) k = k.transpose(1, 2) q = q.transpose(1, 2) v = v.transpose(1, 2) inter_k = inter_k.view(bs, seq_len, seq_len, self.h, self.d_k) inter_v = inter_v.view(bs, seq_len, seq_len, self.h, self.d_k) inter_k = inter_k.transpose(2, 3).transpose(1, 2) inter_v = inter_v.transpose(2, 3).transpose(1, 2) output = self.scaled_dot_product_attention(q, k, v, inter_k, inter_v, self.d_k, mask) output = output.transpose(1, 2).reshape(bs, -1, self.d_model) return output @staticmethod def scaled_dot_product_attention(q, k, v, inter_k, inter_v, d_k, mask): """ Involve pair interaction embeddings when calculating attention scores and output """ scores = torch.matmul(q, k.transpose(-2, -1)) scores += (q[:, :, :, None, :] * inter_k).sum(-1) scores = scores / d_k ** 0.5 scores.masked_fill_(mask == 0, -np.inf) scores = (scores - scores.max()).softmax(dim=-1) output = torch.matmul(scores, v) output += (scores[:, :, :, :, None] * inter_v).sum(-2) return output class TimeIntervalTransformerLayer(nn.Module): def __init__(self, d_model, d_ff, n_heads, dropout, kq_same=False): super().__init__() self.masked_attn_head = TimeIntervalMultiHeadAttention(d_model, n_heads, kq_same=kq_same) self.layer_norm1 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.linear1 = nn.Linear(d_model, d_ff) self.linear2 = nn.Linear(d_ff, d_model) self.layer_norm2 = nn.LayerNorm(d_model) self.dropout2 = nn.Dropout(dropout) def forward(self, seq, pos_k, pos_v, inter_k, inter_v, mask): context = self.masked_attn_head(seq, seq, seq, pos_k, pos_v, inter_k, inter_v, mask) context = self.layer_norm1(self.dropout1(context) + seq) output = self.linear1(context).relu() output = self.linear2(output) output = self.layer_norm2(self.dropout2(output) + context) return output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4, 1]), torch.rand([4, 4, 4, 4, 1]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'd_ff': 4, 'n_heads': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np import torch.nn as nn import torch.distributions assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x2 + 4 * y3), xmask & ymask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + (y0 + 4 * x2 + 16 * y1), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_eq_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_div_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x4 = xindex y0 = yindex % 4 y5 = yindex x3 = xindex // 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x4 + 16 * y0), xmask & ymask, eviction_policy ='evict_last').to(tl.int1) tmp1 = tl.load(in_out_ptr0 + (x4 + 16 * y5), xmask & ymask) tmp2 = tl.load(in_ptr1 + (y0 + 4 * x3 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + y0, ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (y0 + 4 * x4 + 64 * y1), xmask & ymask) tmp4 = tmp2 + tmp3 tmp6 = tmp4 * tmp5 tmp7 = tmp1 + tmp6 tmp8 = 1.0 tmp9 = tmp7 * tmp8 tmp10 = float('-inf') tmp11 = tl.where(tmp0, tmp10, tmp9) tl.store(in_out_ptr0 + (x4 + 16 * y5), tmp11, xmask & ymask) @triton.jit def triton_per_fused_max_4(in_ptr0, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp1, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp3, None) @triton.jit def triton_poi_fused__softmax_sub_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tmp0 - tmp2 tmp5 = tmp4 - tmp2 tmp6 = triton_helpers.maximum(tmp3, tmp5) tmp8 = tmp7 - tmp2 tmp9 = triton_helpers.maximum(tmp6, tmp8) tmp11 = tmp10 - tmp2 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp13 = tmp3 - tmp12 tmp14 = tl_math.exp(tmp13) tmp15 = tmp5 - tmp12 tmp16 = tl_math.exp(tmp15) tmp17 = tmp14 + tmp16 tmp18 = tmp8 - tmp12 tmp19 = tl_math.exp(tmp18) tmp20 = tmp17 + tmp19 tmp21 = tmp11 - tmp12 tmp22 = tl_math.exp(tmp21) tmp23 = tmp20 + tmp22 tl.store(out_ptr0 + x0, tmp12, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused__softmax_eq_isnan_logical_and_logical_or_sub_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp0 - tmp2 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp0 == tmp2 tmp10 = libdevice.isnan(tmp0).to(tl.int1) tmp11 = libdevice.isnan(tmp2).to(tl.int1) tmp12 = tmp10 & tmp11 tmp13 = tmp9 | tmp12 tl.store(out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr1 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_add_mul_sum_7(in_out_ptr0, in_ptr0, in_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_out_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (4 * x2 + 16 * y3), xmask & ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * x2 + 16 * y3), xmask & ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (4 + y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x2 + 16 * y3), xmask & ymask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (8 + y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (3 + 4 * x2 + 16 * y3), xmask & ymask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (12 + y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp3 = tmp1 * tmp2 tmp6 = tmp4 * tmp5 tmp7 = tmp3 + tmp6 tmp10 = tmp8 * tmp9 tmp11 = tmp7 + tmp10 tmp14 = tmp12 * tmp13 tmp15 = tmp11 + tmp14 tmp16 = tmp0 + tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + (x2 + 4 * y3), tmp16, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2 + 4 * y3), xmask & ymask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_native_layer_norm_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_11(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_12(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1)) assert_size_stride(primals_11, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1)) assert_size_stride(primals_12, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4, 4), (4, 1)) assert_size_stride(primals_18, (4,), (1,)) assert_size_stride(primals_19, (4,), (1,)) assert_size_stride(primals_20, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1) del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_6, buf3, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(16, 4)](buf0, primals_3, primals_4, buf4, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_3 del primals_4 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_eq_2[grid(64)](primals_12, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_div_masked_fill_3[grid(16, 16)](buf7, buf6, buf1, primals_6, primals_10, 16, 16, XBLOCK=16, YBLOCK=16, num_warps= 4, num_stages=1) del primals_6 buf8 = empty_strided_cuda((), (), torch.float32) triton_per_fused_max_4[grid(1)](buf7, buf8, 1, 256, num_warps=2, num_stages=1) buf9 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf1 buf10 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf0 triton_poi_fused__softmax_sub_5[grid(64)](buf7, buf8, buf9, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf27 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused__softmax_eq_isnan_logical_and_logical_or_sub_6[grid (256)](buf7, buf8, buf9, buf10, buf11, buf27, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf7 del buf8 buf12 = reinterpret_tensor(buf9, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf9 triton_poi_fused_clone_1[grid(16, 4)](buf2, primals_8, primals_9, buf12, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_8 del primals_9 buf13 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf12, (16, 4, 1), (4, 1, 0), 0), out=buf13) buf14 = reinterpret_tensor(buf13, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf13 triton_poi_fused_add_mul_sum_7[grid(16, 4)](buf14, buf11, primals_11, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) buf15 = reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0) del buf10 triton_poi_fused_add_native_layer_norm_8[grid(16, 4)](buf14, primals_1, buf15, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf17 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_native_layer_norm_9[grid(16)](buf15, buf16, buf17, 16, XBLOCK=16, num_warps=1, num_stages=1) buf18 = reinterpret_tensor(buf14, (4, 4, 4), (16, 4, 1), 0) del buf14 triton_poi_fused_native_layer_norm_10[grid(64)](buf15, buf16, buf17, primals_13, primals_14, buf18, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_14 buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf19) buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0) del buf19 buf26 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_11[grid(64)](buf20, primals_16, buf26, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_16 buf21 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf20, (16, 4), (4, 1), 0), reinterpret_tensor(primals_17, (4, 4), (1, 4), 0), out=buf21) buf22 = reinterpret_tensor(buf21, (4, 4, 4), (16, 4, 1), 0) del buf21 triton_poi_fused_add_12[grid(64)](buf22, primals_18, buf18, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_18 buf23 = buf17 del buf17 buf24 = buf16 del buf16 triton_poi_fused_native_layer_norm_9[grid(16)](buf22, buf23, buf24, 16, XBLOCK=16, num_warps=1, num_stages=1) buf25 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_10[grid(64)](buf22, buf23, buf24, primals_19, primals_20, buf25, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf23 del buf24 del primals_20 return (buf25, primals_10, primals_11, primals_13, primals_19, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf6, buf11, buf15, reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(buf20, (16, 4), (4, 1), 0), buf22, primals_17, buf26, primals_15, reinterpret_tensor(buf12, (16, 1, 4), (4, 1, 1), 0), buf27, reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)) class TimeIntervalMultiHeadAttention(nn.Module): def __init__(self, d_model, n_heads, kq_same=False, bias=True): super().__init__() """ It also needs position and interaction (time interval) key/value input. """ self.d_model = d_model self.h = n_heads self.d_k = self.d_model // self.h self.kq_same = kq_same self.v_linear = nn.Linear(d_model, d_model, bias=bias) self.k_linear = nn.Linear(d_model, d_model, bias=bias) if not kq_same: self.q_linear = nn.Linear(d_model, d_model, bias=bias) def forward(self, q, k, v, pos_k, pos_v, inter_k, inter_v, mask): bs, seq_len = k.size(0), k.size(1) k = (self.k_linear(k) + pos_k).view(bs, seq_len, self.h, self.d_k) if not self.kq_same: q = self.q_linear(q).view(bs, seq_len, self.h, self.d_k) else: q = self.k_linear(q).view(bs, seq_len, self.h, self.d_k) v = (self.v_linear(v) + pos_v).view(bs, seq_len, self.h, self.d_k) k = k.transpose(1, 2) q = q.transpose(1, 2) v = v.transpose(1, 2) inter_k = inter_k.view(bs, seq_len, seq_len, self.h, self.d_k) inter_v = inter_v.view(bs, seq_len, seq_len, self.h, self.d_k) inter_k = inter_k.transpose(2, 3).transpose(1, 2) inter_v = inter_v.transpose(2, 3).transpose(1, 2) output = self.scaled_dot_product_attention(q, k, v, inter_k, inter_v, self.d_k, mask) output = output.transpose(1, 2).reshape(bs, -1, self.d_model) return output @staticmethod def scaled_dot_product_attention(q, k, v, inter_k, inter_v, d_k, mask): """ Involve pair interaction embeddings when calculating attention scores and output """ scores = torch.matmul(q, k.transpose(-2, -1)) scores += (q[:, :, :, None, :] * inter_k).sum(-1) scores = scores / d_k ** 0.5 scores.masked_fill_(mask == 0, -np.inf) scores = (scores - scores.max()).softmax(dim=-1) output = torch.matmul(scores, v) output += (scores[:, :, :, :, None] * inter_v).sum(-2) return output class TimeIntervalTransformerLayerNew(nn.Module): def __init__(self, d_model, d_ff, n_heads, dropout, kq_same=False): super().__init__() self.masked_attn_head = TimeIntervalMultiHeadAttention(d_model, n_heads, kq_same=kq_same) self.layer_norm1 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.linear1 = nn.Linear(d_model, d_ff) self.linear2 = nn.Linear(d_ff, d_model) self.layer_norm2 = nn.LayerNorm(d_model) self.dropout2 = nn.Dropout(dropout) def forward(self, input_0, input_1, input_2, input_3, input_4, input_5): primals_2 = self.masked_attn_head.v_linear.weight primals_3 = self.masked_attn_head.v_linear.bias primals_5 = self.masked_attn_head.k_linear.weight primals_6 = self.masked_attn_head.k_linear.bias primals_7 = self.masked_attn_head.q_linear.weight primals_8 = self.masked_attn_head.q_linear.bias primals_13 = self.layer_norm1.weight primals_14 = self.layer_norm1.bias primals_15 = self.linear1.weight primals_16 = self.linear1.bias primals_17 = self.linear2.weight primals_18 = self.linear2.bias primals_19 = self.layer_norm2.weight primals_20 = self.layer_norm2.bias primals_1 = input_0 primals_4 = input_1 primals_9 = input_2 primals_10 = input_3 primals_11 = input_4 primals_12 = input_5 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20]) return output[0]
nmrenyi/ReChorus
TimeIntervalTransformerLayer
false
16,208
[ "MIT" ]
314
9ab632579d0464b0aaf365539f87b04866920b66
https://github.com/nmrenyi/ReChorus/tree/9ab632579d0464b0aaf365539f87b04866920b66
SpatialSoftmax
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.parameter import Parameter class SpatialSoftmax(nn.Module): def __init__(self, temperature=1, device='cpu'): super(SpatialSoftmax, self).__init__() if temperature: self.temperature = Parameter(torch.ones(1) * temperature) else: self.temperature = 1.0 def forward(self, feature): feature = feature.view(feature.shape[0], -1, feature.shape[1] * feature.shape[2]) softmax_attention = F.softmax(feature / self.temperature, dim=-1) return softmax_attention def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused__softmax_0(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp3 = 0.0 tmp4 = tmp2 >= tmp3 tmp5 = 1.0 tmp6 = -1.0 tmp7 = tl.where(tmp4, tmp5, tmp6) tmp8 = tmp0 * tmp7 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.where(xmask, tmp9, float('-inf')) tmp12 = triton_helpers.max2(tmp11, 1)[:, None] tmp13 = tmp8 - tmp12 tmp14 = tmp7 * tmp2 tmp15 = tmp13 / tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.where(xmask, tmp17, 0) tmp20 = tl.sum(tmp19, 1)[:, None] tmp21 = tmp16 / tmp20 tl.store(out_ptr2 + (r1 + 16 * x0), tmp21, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) get_raw_stream(0) triton_per_fused__softmax_0[grid(16)](primals_1, primals_2, buf2, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) return buf2, primals_1, primals_2, buf2 class SpatialSoftmaxNew(nn.Module): def __init__(self, temperature=1, device='cpu'): super(SpatialSoftmaxNew, self).__init__() if temperature: self.temperature = Parameter(torch.ones(1) * temperature) else: self.temperature = 1.0 def forward(self, input_0): primals_2 = self.temperature primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
ozcell/ENet-SAD_Pytorch
SpatialSoftmax
false
16,209
[ "MIT" ]
53
aaa79b5e96316e1bf24d3c2147ee622d4f17bc24
https://github.com/ozcell/ENet-SAD_Pytorch/tree/aaa79b5e96316e1bf24d3c2147ee622d4f17bc24
GoodDiscriminator
import torch from torch import nn class MyConvo2d(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, he_init=True, stride=1, bias=True): super(MyConvo2d, self).__init__() self.he_init = he_init self.padding = int((kernel_size - 1) / 2) self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1, padding=self.padding, bias=bias) def forward(self, input): output = self.conv(input) return output class ConvMeanPool(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, he_init=True): super(ConvMeanPool, self).__init__() self.he_init = he_init self.conv = MyConvo2d(input_dim, output_dim, kernel_size, he_init= self.he_init) def forward(self, input): output = self.conv(input) output = (output[:, :, ::2, ::2] + output[:, :, 1::2, ::2] + output [:, :, ::2, 1::2] + output[:, :, 1::2, 1::2]) / 4 return output class MeanPoolConv(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, he_init=True): super(MeanPoolConv, self).__init__() self.he_init = he_init self.conv = MyConvo2d(input_dim, output_dim, kernel_size, he_init= self.he_init) def forward(self, input): output = input output = (output[:, :, ::2, ::2] + output[:, :, 1::2, ::2] + output [:, :, ::2, 1::2] + output[:, :, 1::2, 1::2]) / 4 output = self.conv(output) return output class DepthToSpace(nn.Module): def __init__(self, block_size): super(DepthToSpace, self).__init__() self.block_size = block_size self.block_size_sq = block_size * block_size def forward(self, input): output = input.permute(0, 2, 3, 1) batch_size, input_height, input_width, input_depth = output.size() output_depth = int(input_depth / self.block_size_sq) output_width = int(input_width * self.block_size) output_height = int(input_height * self.block_size) t_1 = output.reshape(batch_size, input_height, input_width, self. block_size_sq, output_depth) spl = t_1.split(self.block_size, 3) stacks = [t_t.reshape(batch_size, input_height, output_width, output_depth) for t_t in spl] output = torch.stack(stacks, 0).transpose(0, 1).permute(0, 2, 1, 3, 4 ).reshape(batch_size, output_height, output_width, output_depth) output = output.permute(0, 3, 1, 2) return output class UpSampleConv(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, he_init=True, bias=True): super(UpSampleConv, self).__init__() self.he_init = he_init self.conv = MyConvo2d(input_dim, output_dim, kernel_size, he_init= self.he_init, bias=bias) self.depth_to_space = DepthToSpace(2) def forward(self, input): output = input output = torch.cat((output, output, output, output), 1) output = self.depth_to_space(output) output = self.conv(output) return output class ResidualBlock(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, resample=None, hw=64 ): super(ResidualBlock, self).__init__() self.input_dim = input_dim self.output_dim = output_dim self.kernel_size = kernel_size self.resample = resample self.bn1 = None self.bn2 = None self.relu1 = nn.ReLU() self.relu2 = nn.ReLU() if resample == 'down': self.bn1 = nn.LayerNorm([input_dim, hw, hw]) self.bn2 = nn.LayerNorm([input_dim, hw, hw]) elif resample == 'up': self.bn1 = nn.BatchNorm2d(input_dim) self.bn2 = nn.BatchNorm2d(output_dim) elif resample is None: self.bn1 = nn.BatchNorm2d(output_dim) self.bn2 = nn.LayerNorm([input_dim, hw, hw]) else: raise Exception('invalid resample value') if resample == 'down': self.conv_shortcut = MeanPoolConv(input_dim, output_dim, kernel_size=1, he_init=False) self.conv_1 = MyConvo2d(input_dim, input_dim, kernel_size= kernel_size, bias=False) self.conv_2 = ConvMeanPool(input_dim, output_dim, kernel_size= kernel_size) elif resample == 'up': self.conv_shortcut = UpSampleConv(input_dim, output_dim, kernel_size=1, he_init=False) self.conv_1 = UpSampleConv(input_dim, output_dim, kernel_size= kernel_size, bias=False) self.conv_2 = MyConvo2d(output_dim, output_dim, kernel_size= kernel_size) elif resample is None: self.conv_shortcut = MyConvo2d(input_dim, output_dim, kernel_size=1, he_init=False) self.conv_1 = MyConvo2d(input_dim, input_dim, kernel_size= kernel_size, bias=False) self.conv_2 = MyConvo2d(input_dim, output_dim, kernel_size= kernel_size) else: raise Exception('invalid resample value') def forward(self, input): if self.input_dim == self.output_dim and self.resample is None: shortcut = input else: shortcut = self.conv_shortcut(input) output = input output = self.bn1(output) output = self.relu1(output) output = self.conv_1(output) output = self.bn2(output) output = self.relu2(output) output = self.conv_2(output) return shortcut + output class GoodDiscriminator(nn.Module): def __init__(self, dim=64): super(GoodDiscriminator, self).__init__() self.dim = dim self.ssize = self.dim // 16 self.conv1 = MyConvo2d(3, self.dim, 3, he_init=False) self.rb1 = ResidualBlock(self.dim, 2 * self.dim, 3, resample='down', hw=self.dim) self.rb2 = ResidualBlock(2 * self.dim, 4 * self.dim, 3, resample= 'down', hw=int(self.dim / 2)) self.rb3 = ResidualBlock(4 * self.dim, 8 * self.dim, 3, resample= 'down', hw=int(self.dim / 4)) self.rb4 = ResidualBlock(8 * self.dim, 8 * self.dim, 3, resample= 'down', hw=int(self.dim / 8)) self.ln1 = nn.Linear(self.ssize * self.ssize * 8 * self.dim, 1) def forward(self, input): output = input.contiguous() output = output.view(-1, 3, self.dim, self.dim) output = self.conv1(output) output = self.rb1(output) output = self.rb2(output) output = self.rb3(output) output = self.rb4(output) output = output.view(-1, self.ssize * self.ssize * 8 * self.dim) output = self.ln1(output) output = output.view(-1) return output def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_view_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_convolution_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, None) @triton.jit def triton_poi_fused_add_div_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = xindex // 64 % 32 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (4096 + x0 + 128 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4160 + x0 + 128 * x1 + 8192 * x2), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x3, tmp8, None) @triton.jit def triton_per_fused_native_layer_norm_11(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex x0 = xindex % 32 x1 = xindex // 32 % 64 x2 = xindex // 2048 x4 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * (r3 % 64) + 4096 * ((r3 + 128 * x1) // 64 % 64) + 262144 * x2 + (r3 + 128 * x1) // 4096), None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.full([XBLOCK, 1], 128, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tl.store(out_ptr0 + x4, tmp8, None) tl.store(out_ptr1 + x4, tmp13, None) tl.store(out_ptr2 + x4, tmp7, None) @triton.jit def triton_per_fused_native_layer_norm_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 128 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 32 x1 = xindex // 32 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 32 * r2 + 2048 * x1), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (x0 + 32 * r2 + 2048 * x1), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (x0 + 32 * r2 + 2048 * x1), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp15 = tmp12[:, None] tl.store(out_ptr0 + x3, tmp13, xmask) tl.store(out_ptr1 + x3, tmp14, xmask) tl.store(out_ptr2 + x3, tmp15, xmask) @triton.jit def triton_per_fused_native_layer_norm_13(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 32 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 32 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (r1 + 32 * x0), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp12[:, None] tmp16 = 262144.0 tmp17 = tmp14 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp20, xmask) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused_native_layer_norm_relu_14(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (y0 + 64 * x2 + 262144 * y1), ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y1, ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + y1, ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x2 + 4096 * y0), ymask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr4 + (x2 + 4096 * y0), ymask, eviction_policy= 'evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1, 1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + (y0 + 64 * x2 + 262144 * y1), tmp10, ymask) @triton.jit def triton_poi_fused_add_convolution_div_15(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 128 x1 = xindex // 128 % 32 x2 = xindex // 4096 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 256 * x1 + 16384 * x2), None) tmp4 = tl.load(in_ptr2 + x0, None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (8192 + x0 + 256 * x1 + 16384 * x2), None) tmp9 = tl.load(in_ptr1 + (128 + x0 + 256 * x1 + 16384 * x2), None) tmp12 = tl.load(in_ptr1 + (8320 + x0 + 256 * x1 + 16384 * x2), None) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp7 = tmp6 + tmp4 tmp8 = tmp5 + tmp7 tmp10 = tmp9 + tmp4 tmp11 = tmp8 + tmp10 tmp13 = tmp12 + tmp4 tmp14 = tmp11 + tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tmp17 = tmp2 + tmp16 tl.store(in_out_ptr0 + x3, tmp17, None) @triton.jit def triton_poi_fused_add_div_16(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 % 16 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (4096 + x0 + 256 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4224 + x0 + 256 * x1 + 8192 * x2), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x3, tmp8, None) @triton.jit def triton_per_fused_native_layer_norm_17(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex x0 = xindex % 16 x1 = xindex // 16 % 64 x2 = xindex // 1024 x4 = xindex tmp0 = tl.load(in_ptr0 + (8 * x0 + 128 * (r3 % 32) + 4096 * ((r3 + 128 * x1) // 32 % 32) + 131072 * x2 + (r3 + 128 * x1) // 1024), None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.full([XBLOCK, 1], 128, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tl.store(out_ptr0 + x4, tmp8, None) tl.store(out_ptr1 + x4, tmp13, None) tl.store(out_ptr2 + x4, tmp7, None) @triton.jit def triton_per_fused_native_layer_norm_18(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 16 x1 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 1024 * x1), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (x0 + 16 * r2 + 1024 * x1), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (x0 + 16 * r2 + 1024 * x1), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp15 = tmp12[:, None] tl.store(out_ptr0 + x3, tmp13, xmask) tl.store(out_ptr1 + x3, tmp14, xmask) tl.store(out_ptr2 + x3, tmp15, xmask) @triton.jit def triton_per_fused_native_layer_norm_native_layer_norm_backward_19(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (r1 + 16 * x0), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp12[:, None] tmp16 = 131072.0 tmp17 = tmp14 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tmp21 = 7.62939453125e-06 tmp22 = tmp20 * tmp21 tl.store(out_ptr2 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp13, xmask) tl.store(out_ptr1 + x0, tmp14, xmask) @triton.jit def triton_poi_fused_native_layer_norm_20(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x1 = xindex // 131072 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 131072.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tl.store(in_out_ptr0 + x2, tmp9, None) @triton.jit def triton_poi_fused_native_layer_norm_relu_21(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 512 xnumel = 1024 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (y0 + 128 * x2 + 131072 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2 + 1024 * y0), xmask & ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x2 + 1024 * y0), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.full([1, 1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + (y0 + 128 * x2 + 131072 * y1), tmp6, xmask & ymask) @triton.jit def triton_per_fused_native_layer_norm_22(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (r1 + 16 * x0), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp12[:, None] tmp16 = 131072.0 tmp17 = tmp14 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp20, xmask) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused_native_layer_norm_relu_23(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 512 xnumel = 1024 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (y0 + 128 * x2 + 131072 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y1, ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + y1, ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x2 + 1024 * y0), xmask & ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x2 + 1024 * y0), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1, 1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + (y0 + 128 * x2 + 131072 * y1), tmp10, xmask & ymask) @triton.jit def triton_poi_fused_add_convolution_div_24(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 256 x1 = xindex // 256 % 16 x2 = xindex // 4096 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 512 * x1 + 16384 * x2), None) tmp4 = tl.load(in_ptr2 + x0, None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (8192 + x0 + 512 * x1 + 16384 * x2), None) tmp9 = tl.load(in_ptr1 + (256 + x0 + 512 * x1 + 16384 * x2), None) tmp12 = tl.load(in_ptr1 + (8448 + x0 + 512 * x1 + 16384 * x2), None) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp7 = tmp6 + tmp4 tmp8 = tmp5 + tmp7 tmp10 = tmp9 + tmp4 tmp11 = tmp8 + tmp10 tmp13 = tmp12 + tmp4 tmp14 = tmp11 + tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tmp17 = tmp2 + tmp16 tl.store(in_out_ptr0 + x3, tmp17, None) @triton.jit def triton_poi_fused_add_div_25(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 256 x1 = xindex // 256 % 8 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (4096 + x0 + 512 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4352 + x0 + 512 * x1 + 8192 * x2), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x3, tmp8, None) @triton.jit def triton_per_fused_native_layer_norm_26(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex x0 = xindex % 8 x1 = xindex // 8 % 64 x2 = xindex // 512 x4 = xindex tmp0 = tl.load(in_ptr0 + (32 * x0 + 256 * (r3 % 16) + 4096 * ((r3 + 128 * x1) // 16 % 16) + 65536 * x2 + (r3 + 128 * x1) // 256), None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.full([XBLOCK, 1], 128, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tl.store(out_ptr0 + x4, tmp8, None) tl.store(out_ptr1 + x4, tmp13, None) tl.store(out_ptr2 + x4, tmp7, None) @triton.jit def triton_per_fused_native_layer_norm_27(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 32 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 8 x1 = xindex // 8 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 8 * r2 + 512 * x1), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (x0 + 8 * r2 + 512 * x1), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (x0 + 8 * r2 + 512 * x1), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp15 = tmp12[:, None] tl.store(out_ptr0 + x3, tmp13, xmask) tl.store(out_ptr1 + x3, tmp14, xmask) tl.store(out_ptr2 + x3, tmp15, xmask) @triton.jit def triton_per_fused_native_layer_norm_native_layer_norm_backward_28(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 8 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 8 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (r1 + 8 * x0), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp12[:, None] tmp16 = 65536.0 tmp17 = tmp14 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tmp21 = 1.52587890625e-05 tmp22 = tmp20 * tmp21 tl.store(out_ptr2 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp13, xmask) tl.store(out_ptr1 + x0, tmp14, xmask) @triton.jit def triton_poi_fused_native_layer_norm_29(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x1 = xindex // 65536 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 65536.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tl.store(in_out_ptr0 + x2, tmp9, None) @triton.jit def triton_poi_fused_native_layer_norm_relu_30(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 65536 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2 + 256 * y0), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (x2 + 256 * y0), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.full([1, 1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + (y0 + 256 * x2 + 65536 * y1), tmp6, xmask) @triton.jit def triton_per_fused_native_layer_norm_31(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 8 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 8 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (r1 + 8 * x0), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp12[:, None] tmp16 = 65536.0 tmp17 = tmp14 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp20, xmask) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused_native_layer_norm_relu_32(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 65536 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y1, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + y1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x2 + 256 * y0), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr4 + (x2 + 256 * y0), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1, 1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + (y0 + 256 * x2 + 65536 * y1), tmp10, xmask) @triton.jit def triton_poi_fused_add_convolution_div_33(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 512 x1 = xindex // 512 % 8 x2 = xindex // 4096 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 1024 * x1 + 16384 * x2), None) tmp4 = tl.load(in_ptr2 + x0, None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (8192 + x0 + 1024 * x1 + 16384 * x2), None) tmp9 = tl.load(in_ptr1 + (512 + x0 + 1024 * x1 + 16384 * x2), None) tmp12 = tl.load(in_ptr1 + (8704 + x0 + 1024 * x1 + 16384 * x2), None) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp7 = tmp6 + tmp4 tmp8 = tmp5 + tmp7 tmp10 = tmp9 + tmp4 tmp11 = tmp8 + tmp10 tmp13 = tmp12 + tmp4 tmp14 = tmp11 + tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tmp17 = tmp2 + tmp16 tl.store(in_out_ptr0 + x3, tmp17, None) @triton.jit def triton_poi_fused_add_div_34(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 512 x1 = xindex // 512 % 4 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (4096 + x0 + 1024 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (512 + x0 + 1024 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4608 + x0 + 1024 * x1 + 8192 * x2), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x3, tmp8, None) @triton.jit def triton_per_fused_native_layer_norm_35(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 1024 RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 256 x1 = xindex // 256 x3 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 512 * (r2 % 64) + 32768 * x1 + r2 // 64), xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 128, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tl.store(out_ptr0 + x3, tmp10, xmask) tl.store(out_ptr1 + x3, tmp16, xmask) tl.store(out_ptr2 + x3, tmp9, xmask) @triton.jit def triton_per_fused_native_layer_norm_36(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (r1 + 64 * x0), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp15 = tmp12[:, None] tl.store(out_ptr0 + x0, tmp13, xmask) tl.store(out_ptr1 + x0, tmp14, xmask) tl.store(out_ptr2 + x0, tmp15, xmask) @triton.jit def triton_per_fused_native_layer_norm_native_layer_norm_backward_37(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 4 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (r1 + 4 * x0), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp12[:, None] tmp16 = 32768.0 tmp17 = tmp14 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tmp21 = 3.0517578125e-05 tmp22 = tmp20 * tmp21 tl.store(out_ptr2 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp13, xmask) tl.store(out_ptr1 + x0, tmp14, xmask) @triton.jit def triton_poi_fused_native_layer_norm_38(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x1 = xindex // 32768 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 32768.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tl.store(in_out_ptr0 + x2, tmp9, None) @triton.jit def triton_poi_fused_native_layer_norm_relu_39(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (y0 + 512 * x2 + 32768 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2 + 64 * y0), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (x2 + 64 * y0), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.full([1, 1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + (y0 + 512 * x2 + 32768 * y1), tmp6, xmask) @triton.jit def triton_per_fused_native_layer_norm_40(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 4 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (r1 + 4 * x0), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp12[:, None] tmp16 = 32768.0 tmp17 = tmp14 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp20, xmask) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused_native_layer_norm_relu_41(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (y0 + 512 * x2 + 32768 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y1, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + y1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x2 + 64 * y0), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr4 + (x2 + 64 * y0), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1, 1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + (y0 + 512 * x2 + 32768 * y1), tmp10, xmask) @triton.jit def triton_poi_fused_add_convolution_div_42(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 64 xnumel = 512 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x3 = xindex y4 = yindex y0 = yindex % 4 y5 = yindex // 4 y2 = yindex // 16 y6 = yindex % 16 tmp0 = tl.load(in_ptr0 + (x3 + 512 * y4), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x3 + 1024 * y0 + 8192 * y5), xmask & ymask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (4096 + x3 + 1024 * y0 + 8192 * y5), xmask & ymask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (512 + x3 + 1024 * y0 + 8192 * y5), xmask & ymask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr2 + (4608 + x3 + 1024 * y0 + 8192 * y5), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp7 = tmp6 + tmp4 tmp8 = tmp5 + tmp7 tmp10 = tmp9 + tmp4 tmp11 = tmp8 + tmp10 tmp13 = tmp12 + tmp4 tmp14 = tmp11 + tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tmp17 = tmp2 + tmp16 tl.store(out_ptr0 + (y6 + 16 * x3 + 8192 * y2), tmp17, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41) = args args.clear() assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_2, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (128, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (64, 64, 64), (4096, 64, 1)) assert_size_stride(primals_7, (64, 64, 64), (4096, 64, 1)) assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (64, 64, 64), (4096, 64, 1)) assert_size_stride(primals_10, (64, 64, 64), (4096, 64, 1)) assert_size_stride(primals_11, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_12, (128,), (1,)) assert_size_stride(primals_13, (256, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_14, (256,), (1,)) assert_size_stride(primals_15, (128, 32, 32), (1024, 32, 1)) assert_size_stride(primals_16, (128, 32, 32), (1024, 32, 1)) assert_size_stride(primals_17, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_18, (128, 32, 32), (1024, 32, 1)) assert_size_stride(primals_19, (128, 32, 32), (1024, 32, 1)) assert_size_stride(primals_20, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_21, (256,), (1,)) assert_size_stride(primals_22, (512, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_23, (512,), (1,)) assert_size_stride(primals_24, (256, 16, 16), (256, 16, 1)) assert_size_stride(primals_25, (256, 16, 16), (256, 16, 1)) assert_size_stride(primals_26, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_27, (256, 16, 16), (256, 16, 1)) assert_size_stride(primals_28, (256, 16, 16), (256, 16, 1)) assert_size_stride(primals_29, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_30, (512,), (1,)) assert_size_stride(primals_31, (512, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_32, (512,), (1,)) assert_size_stride(primals_33, (512, 8, 8), (64, 8, 1)) assert_size_stride(primals_34, (512, 8, 8), (64, 8, 1)) assert_size_stride(primals_35, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_36, (512, 8, 8), (64, 8, 1)) assert_size_stride(primals_37, (512, 8, 8), (64, 8, 1)) assert_size_stride(primals_38, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_39, (512,), (1,)) assert_size_stride(primals_40, (1, 8192), (8192, 1)) assert_size_stride(primals_41, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(192, 9)](primals_2, buf0, 192, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_2 buf1 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch. float32) triton_poi_fused_1[grid(4096, 9)](primals_8, buf1, 4096, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf2 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_2[grid(8192, 9)](primals_11, buf2, 8192, 9, XBLOCK =16, YBLOCK=64, num_warps=4, num_stages=1) del primals_11 buf3 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_3[grid(16384, 9)](primals_17, buf3, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_17 buf4 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_4[grid(32768, 9)](primals_20, buf4, 32768, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_20 buf5 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_5[grid(65536, 9)](primals_26, buf5, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_26 buf6 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_6[grid(131072, 9)](primals_29, buf6, 131072, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_29 buf7 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_7[grid(262144, 9)](primals_35, buf7, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_35 buf8 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_7[grid(262144, 9)](primals_38, buf8, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_38 buf9 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_view_8[grid(12, 4096)](primals_1, buf9, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_1 buf10 = extern_kernels.convolution(buf9, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf11 = buf10 del buf10 triton_poi_fused_convolution_9[grid(1048576)](buf11, primals_3, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_3 buf12 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32) triton_poi_fused_add_div_10[grid(262144)](buf11, buf12, 262144, XBLOCK=1024, num_warps=4, num_stages=1) buf13 = extern_kernels.convolution(buf12, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf14 = empty_strided_cuda((4, 1, 1, 1, 32, 64), (2048, 8192, 8192, 8192, 1, 32), torch.float32) buf15 = empty_strided_cuda((4, 1, 1, 1, 32, 64), (2048, 8192, 8192, 8192, 1, 32), torch.float32) buf16 = empty_strided_cuda((4, 1, 1, 1, 32, 64), (2048, 8192, 8192, 8192, 1, 32), torch.float32) triton_per_fused_native_layer_norm_11[grid(8192)](buf11, buf14, buf15, buf16, 8192, 128, XBLOCK=8, num_warps=8, num_stages=1) buf17 = empty_strided_cuda((4, 1, 1, 1, 32), (32, 128, 128, 128, 1), torch.float32) buf18 = empty_strided_cuda((4, 1, 1, 1, 32), (32, 128, 128, 128, 1), torch.float32) buf19 = empty_strided_cuda((4, 1, 1, 1, 32), (32, 128, 128, 128, 1), torch.float32) triton_per_fused_native_layer_norm_12[grid(128)](buf14, buf15, buf16, buf17, buf18, buf19, 128, 64, XBLOCK=1, num_warps=2, num_stages=1) buf20 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32) buf21 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf23 = reinterpret_tensor(buf21, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf21 triton_per_fused_native_layer_norm_13[grid(4)](buf23, buf17, buf18, buf19, buf20, 4, 32, XBLOCK=1, num_warps=2, num_stages=1) buf24 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64), torch.float32) triton_poi_fused_native_layer_norm_relu_14[grid(256, 4096)](buf11, buf20, buf23, primals_6, primals_7, buf24, 256, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_7 buf25 = extern_kernels.convolution(buf24, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf25, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf26 = buf16 del buf16 buf27 = buf15 del buf15 buf28 = buf14 del buf14 triton_per_fused_native_layer_norm_11[grid(8192)](buf25, buf26, buf27, buf28, 8192, 128, XBLOCK=8, num_warps=8, num_stages=1) buf29 = buf19 del buf19 buf30 = buf18 del buf18 buf31 = buf17 del buf17 triton_per_fused_native_layer_norm_12[grid(128)](buf26, buf27, buf28, buf29, buf30, buf31, 128, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf26 del buf27 del buf28 buf32 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32) buf33 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf35 = reinterpret_tensor(buf33, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf33 triton_per_fused_native_layer_norm_13[grid(4)](buf35, buf29, buf30, buf31, buf32, 4, 32, XBLOCK=1, num_warps=2, num_stages=1) del buf29 del buf30 del buf31 buf36 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64), torch.float32) triton_poi_fused_native_layer_norm_relu_14[grid(256, 4096)](buf25, buf32, buf35, primals_9, primals_10, buf36, 256, 4096, XBLOCK= 32, YBLOCK=32, num_warps=4, num_stages=1) del primals_10 buf37 = extern_kernels.convolution(buf36, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf37, (4, 128, 64, 64), (524288, 1, 8192, 128)) buf38 = buf13 del buf13 triton_poi_fused_add_convolution_div_15[grid(524288)](buf38, primals_5, buf37, primals_12, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del buf37 del primals_12 del primals_5 buf39 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.float32) triton_poi_fused_add_div_16[grid(131072)](buf38, buf39, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf40 = extern_kernels.convolution(buf39, primals_13, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf41 = empty_strided_cuda((4, 1, 1, 1, 16, 64), (1024, 4096, 4096, 4096, 1, 16), torch.float32) buf42 = empty_strided_cuda((4, 1, 1, 1, 16, 64), (1024, 4096, 4096, 4096, 1, 16), torch.float32) buf43 = empty_strided_cuda((4, 1, 1, 1, 16, 64), (1024, 4096, 4096, 4096, 1, 16), torch.float32) triton_per_fused_native_layer_norm_17[grid(4096)](buf38, buf41, buf42, buf43, 4096, 128, XBLOCK=8, num_warps=8, num_stages=1) buf44 = empty_strided_cuda((4, 1, 1, 1, 16), (16, 64, 64, 64, 1), torch.float32) buf45 = empty_strided_cuda((4, 1, 1, 1, 16), (16, 64, 64, 64, 1), torch.float32) buf46 = empty_strided_cuda((4, 1, 1, 1, 16), (16, 64, 64, 64, 1), torch.float32) triton_per_fused_native_layer_norm_18[grid(64)](buf41, buf42, buf43, buf44, buf45, buf46, 64, 64, XBLOCK=1, num_warps=2, num_stages=1) buf47 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf48 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf124 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_per_fused_native_layer_norm_native_layer_norm_backward_19[grid (4)](buf44, buf45, buf46, buf47, buf48, buf124, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) buf50 = buf38 del buf38 triton_poi_fused_native_layer_norm_20[grid(524288)](buf50, buf47, buf48, 524288, XBLOCK=1024, num_warps=4, num_stages=1) buf51 = empty_strided_cuda((4, 128, 32, 32), (131072, 1, 4096, 128), torch.float32) triton_poi_fused_native_layer_norm_relu_21[grid(512, 1024)](buf50, primals_15, primals_16, buf51, 512, 1024, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_16 buf52 = extern_kernels.convolution(buf51, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf52, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf53 = buf43 del buf43 buf54 = buf42 del buf42 buf55 = buf41 del buf41 triton_per_fused_native_layer_norm_17[grid(4096)](buf52, buf53, buf54, buf55, 4096, 128, XBLOCK=8, num_warps=8, num_stages=1) buf56 = buf46 del buf46 buf57 = buf45 del buf45 buf58 = buf44 del buf44 triton_per_fused_native_layer_norm_18[grid(64)](buf53, buf54, buf55, buf56, buf57, buf58, 64, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf53 del buf54 del buf55 buf59 = reinterpret_tensor(buf48, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf48 buf60 = buf47 del buf47 buf62 = reinterpret_tensor(buf60, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf60 triton_per_fused_native_layer_norm_22[grid(4)](buf62, buf56, buf57, buf58, buf59, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf56 del buf57 del buf58 buf63 = empty_strided_cuda((4, 128, 32, 32), (131072, 1, 4096, 128), torch.float32) triton_poi_fused_native_layer_norm_relu_23[grid(512, 1024)](buf52, buf59, buf62, primals_18, primals_19, buf63, 512, 1024, XBLOCK= 32, YBLOCK=32, num_warps=4, num_stages=1) del primals_19 buf64 = extern_kernels.convolution(buf63, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf64, (4, 256, 32, 32), (262144, 1, 8192, 256)) buf65 = buf40 del buf40 triton_poi_fused_add_convolution_div_24[grid(262144)](buf65, primals_14, buf64, primals_21, 262144, XBLOCK=512, num_warps=8, num_stages=1) del buf64 del primals_14 del primals_21 buf66 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.float32) triton_poi_fused_add_div_25[grid(65536)](buf65, buf66, 65536, XBLOCK=512, num_warps=4, num_stages=1) buf67 = extern_kernels.convolution(buf66, primals_22, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf67, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf68 = empty_strided_cuda((4, 1, 1, 1, 8, 64), (512, 2048, 2048, 2048, 1, 8), torch.float32) buf69 = empty_strided_cuda((4, 1, 1, 1, 8, 64), (512, 2048, 2048, 2048, 1, 8), torch.float32) buf70 = empty_strided_cuda((4, 1, 1, 1, 8, 64), (512, 2048, 2048, 2048, 1, 8), torch.float32) triton_per_fused_native_layer_norm_26[grid(2048)](buf65, buf68, buf69, buf70, 2048, 128, XBLOCK=32, num_warps=8, num_stages=1) buf71 = empty_strided_cuda((4, 1, 1, 1, 8), (8, 32, 32, 32, 1), torch.float32) buf72 = empty_strided_cuda((4, 1, 1, 1, 8), (8, 32, 32, 32, 1), torch.float32) buf73 = empty_strided_cuda((4, 1, 1, 1, 8), (8, 32, 32, 32, 1), torch.float32) triton_per_fused_native_layer_norm_27[grid(32)](buf68, buf69, buf70, buf71, buf72, buf73, 32, 64, XBLOCK=1, num_warps=2, num_stages=1) buf74 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf75 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf123 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_per_fused_native_layer_norm_native_layer_norm_backward_28[grid (4)](buf71, buf72, buf73, buf74, buf75, buf123, 4, 8, XBLOCK=1, num_warps=2, num_stages=1) buf77 = buf65 del buf65 triton_poi_fused_native_layer_norm_29[grid(262144)](buf77, buf74, buf75, 262144, XBLOCK=1024, num_warps=4, num_stages=1) buf78 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) triton_poi_fused_native_layer_norm_relu_30[grid(1024, 256)](buf77, primals_24, primals_25, buf78, 1024, 256, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_25 buf79 = extern_kernels.convolution(buf78, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf79, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf80 = buf70 del buf70 buf81 = buf69 del buf69 buf82 = buf68 del buf68 triton_per_fused_native_layer_norm_26[grid(2048)](buf79, buf80, buf81, buf82, 2048, 128, XBLOCK=32, num_warps=8, num_stages=1) buf83 = buf73 del buf73 buf84 = buf72 del buf72 buf85 = buf71 del buf71 triton_per_fused_native_layer_norm_27[grid(32)](buf80, buf81, buf82, buf83, buf84, buf85, 32, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf80 del buf81 del buf82 buf86 = reinterpret_tensor(buf75, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf75 buf87 = buf74 del buf74 buf89 = reinterpret_tensor(buf87, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf87 triton_per_fused_native_layer_norm_31[grid(4)](buf89, buf83, buf84, buf85, buf86, 4, 8, XBLOCK=1, num_warps=2, num_stages=1) del buf83 del buf84 del buf85 buf90 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) triton_poi_fused_native_layer_norm_relu_32[grid(1024, 256)](buf79, buf86, buf89, primals_27, primals_28, buf90, 1024, 256, XBLOCK= 32, YBLOCK=32, num_warps=4, num_stages=1) del primals_28 buf91 = extern_kernels.convolution(buf90, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf91, (4, 512, 16, 16), (131072, 1, 8192, 512)) buf92 = buf67 del buf67 triton_poi_fused_add_convolution_div_33[grid(131072)](buf92, primals_23, buf91, primals_30, 131072, XBLOCK=512, num_warps=8, num_stages=1) del buf91 del primals_23 del primals_30 buf93 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.float32) triton_poi_fused_add_div_34[grid(32768)](buf92, buf93, 32768, XBLOCK=128, num_warps=4, num_stages=1) buf94 = extern_kernels.convolution(buf93, primals_31, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf94, (4, 512, 4, 4), (8192, 1, 2048, 512)) buf95 = empty_strided_cuda((4, 1, 1, 1, 4, 64), (256, 1024, 1024, 1024, 64, 1), torch.float32) buf96 = empty_strided_cuda((4, 1, 1, 1, 4, 64), (256, 1024, 1024, 1024, 64, 1), torch.float32) buf97 = empty_strided_cuda((4, 1, 1, 1, 4, 64), (256, 1024, 1024, 1024, 64, 1), torch.float32) triton_per_fused_native_layer_norm_35[grid(1024)](buf92, buf95, buf96, buf97, 1024, 128, XBLOCK=32, num_warps=8, num_stages=1) buf98 = empty_strided_cuda((4, 1, 1, 1, 4), (4, 16, 16, 16, 1), torch.float32) buf99 = empty_strided_cuda((4, 1, 1, 1, 4), (4, 16, 16, 16, 1), torch.float32) buf100 = empty_strided_cuda((4, 1, 1, 1, 4), (4, 16, 16, 16, 1), torch.float32) triton_per_fused_native_layer_norm_36[grid(16)](buf95, buf96, buf97, buf98, buf99, buf100, 16, 64, XBLOCK=8, num_warps=4, num_stages=1) buf101 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf102 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf122 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_per_fused_native_layer_norm_native_layer_norm_backward_37[grid (4)](buf98, buf99, buf100, buf101, buf102, buf122, 4, 4, XBLOCK =1, num_warps=2, num_stages=1) buf104 = buf92 del buf92 triton_poi_fused_native_layer_norm_38[grid(131072)](buf104, buf101, buf102, 131072, XBLOCK=1024, num_warps=4, num_stages=1) buf105 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_layer_norm_relu_39[grid(2048, 64)](buf104, primals_33, primals_34, buf105, 2048, 64, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_34 buf106 = extern_kernels.convolution(buf105, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf106, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf107 = buf97 del buf97 buf108 = buf96 del buf96 buf109 = buf95 del buf95 triton_per_fused_native_layer_norm_35[grid(1024)](buf106, buf107, buf108, buf109, 1024, 128, XBLOCK=32, num_warps=8, num_stages=1) buf110 = buf99 del buf99 buf111 = buf98 del buf98 buf112 = buf100 del buf100 triton_per_fused_native_layer_norm_36[grid(16)](buf107, buf108, buf109, buf110, buf111, buf112, 16, 64, XBLOCK=8, num_warps=4, num_stages=1) del buf107 del buf108 del buf109 buf113 = reinterpret_tensor(buf102, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf102 buf114 = buf101 del buf101 buf116 = reinterpret_tensor(buf114, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf114 triton_per_fused_native_layer_norm_40[grid(4)](buf116, buf110, buf111, buf112, buf113, 4, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf110 del buf111 del buf112 buf117 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_layer_norm_relu_41[grid(2048, 64)](buf106, buf113, buf116, primals_36, primals_37, buf117, 2048, 64, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_37 buf118 = extern_kernels.convolution(buf117, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf118, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf119 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch .float32) triton_poi_fused_add_convolution_div_42[grid(64, 512)](buf94, primals_32, buf118, primals_39, buf119, 64, 512, XBLOCK=256, YBLOCK=1, num_warps=4, num_stages=1) del buf118 del buf94 del primals_32 del primals_39 buf121 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_41, reinterpret_tensor(buf119, (4, 8192), (8192, 1), 0), reinterpret_tensor(primals_40, (8192, 1), (1, 8192), 0), alpha=1, beta=1, out=buf121) del primals_41 return (reinterpret_tensor(buf121, (4,), (1,), 0), buf0, primals_4, primals_6, buf1, primals_9, buf2, primals_13, primals_15, buf3, primals_18, buf4, primals_22, primals_24, buf5, primals_27, buf6, primals_31, primals_33, buf7, primals_36, buf8, buf9, buf11, buf12, buf20, buf23, buf24, buf25, buf32, buf35, buf36, buf39, buf50, buf51, buf52, buf59, buf62, buf63, buf66, buf77, buf78, buf79, buf86, buf89, buf90, buf93, buf104, buf105, buf106, buf113, buf116, buf117, reinterpret_tensor(buf119, (4, 8192), (8192, 1), 0), primals_40, buf122, buf123, buf124) class MyConvo2d(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, he_init=True, stride=1, bias=True): super(MyConvo2d, self).__init__() self.he_init = he_init self.padding = int((kernel_size - 1) / 2) self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1, padding=self.padding, bias=bias) def forward(self, input): output = self.conv(input) return output class ConvMeanPool(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, he_init=True): super(ConvMeanPool, self).__init__() self.he_init = he_init self.conv = MyConvo2d(input_dim, output_dim, kernel_size, he_init= self.he_init) def forward(self, input): output = self.conv(input) output = (output[:, :, ::2, ::2] + output[:, :, 1::2, ::2] + output [:, :, ::2, 1::2] + output[:, :, 1::2, 1::2]) / 4 return output class MeanPoolConv(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, he_init=True): super(MeanPoolConv, self).__init__() self.he_init = he_init self.conv = MyConvo2d(input_dim, output_dim, kernel_size, he_init= self.he_init) def forward(self, input): output = input output = (output[:, :, ::2, ::2] + output[:, :, 1::2, ::2] + output [:, :, ::2, 1::2] + output[:, :, 1::2, 1::2]) / 4 output = self.conv(output) return output class DepthToSpace(nn.Module): def __init__(self, block_size): super(DepthToSpace, self).__init__() self.block_size = block_size self.block_size_sq = block_size * block_size def forward(self, input): output = input.permute(0, 2, 3, 1) batch_size, input_height, input_width, input_depth = output.size() output_depth = int(input_depth / self.block_size_sq) output_width = int(input_width * self.block_size) output_height = int(input_height * self.block_size) t_1 = output.reshape(batch_size, input_height, input_width, self. block_size_sq, output_depth) spl = t_1.split(self.block_size, 3) stacks = [t_t.reshape(batch_size, input_height, output_width, output_depth) for t_t in spl] output = torch.stack(stacks, 0).transpose(0, 1).permute(0, 2, 1, 3, 4 ).reshape(batch_size, output_height, output_width, output_depth) output = output.permute(0, 3, 1, 2) return output class UpSampleConv(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, he_init=True, bias=True): super(UpSampleConv, self).__init__() self.he_init = he_init self.conv = MyConvo2d(input_dim, output_dim, kernel_size, he_init= self.he_init, bias=bias) self.depth_to_space = DepthToSpace(2) def forward(self, input): output = input output = torch.cat((output, output, output, output), 1) output = self.depth_to_space(output) output = self.conv(output) return output class ResidualBlock(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, resample=None, hw=64 ): super(ResidualBlock, self).__init__() self.input_dim = input_dim self.output_dim = output_dim self.kernel_size = kernel_size self.resample = resample self.bn1 = None self.bn2 = None self.relu1 = nn.ReLU() self.relu2 = nn.ReLU() if resample == 'down': self.bn1 = nn.LayerNorm([input_dim, hw, hw]) self.bn2 = nn.LayerNorm([input_dim, hw, hw]) elif resample == 'up': self.bn1 = nn.BatchNorm2d(input_dim) self.bn2 = nn.BatchNorm2d(output_dim) elif resample is None: self.bn1 = nn.BatchNorm2d(output_dim) self.bn2 = nn.LayerNorm([input_dim, hw, hw]) else: raise Exception('invalid resample value') if resample == 'down': self.conv_shortcut = MeanPoolConv(input_dim, output_dim, kernel_size=1, he_init=False) self.conv_1 = MyConvo2d(input_dim, input_dim, kernel_size= kernel_size, bias=False) self.conv_2 = ConvMeanPool(input_dim, output_dim, kernel_size= kernel_size) elif resample == 'up': self.conv_shortcut = UpSampleConv(input_dim, output_dim, kernel_size=1, he_init=False) self.conv_1 = UpSampleConv(input_dim, output_dim, kernel_size= kernel_size, bias=False) self.conv_2 = MyConvo2d(output_dim, output_dim, kernel_size= kernel_size) elif resample is None: self.conv_shortcut = MyConvo2d(input_dim, output_dim, kernel_size=1, he_init=False) self.conv_1 = MyConvo2d(input_dim, input_dim, kernel_size= kernel_size, bias=False) self.conv_2 = MyConvo2d(input_dim, output_dim, kernel_size= kernel_size) else: raise Exception('invalid resample value') def forward(self, input): if self.input_dim == self.output_dim and self.resample is None: shortcut = input else: shortcut = self.conv_shortcut(input) output = input output = self.bn1(output) output = self.relu1(output) output = self.conv_1(output) output = self.bn2(output) output = self.relu2(output) output = self.conv_2(output) return shortcut + output class GoodDiscriminatorNew(nn.Module): def __init__(self, dim=64): super(GoodDiscriminatorNew, self).__init__() self.dim = dim self.ssize = self.dim // 16 self.conv1 = MyConvo2d(3, self.dim, 3, he_init=False) self.rb1 = ResidualBlock(self.dim, 2 * self.dim, 3, resample='down', hw=self.dim) self.rb2 = ResidualBlock(2 * self.dim, 4 * self.dim, 3, resample= 'down', hw=int(self.dim / 2)) self.rb3 = ResidualBlock(4 * self.dim, 8 * self.dim, 3, resample= 'down', hw=int(self.dim / 4)) self.rb4 = ResidualBlock(8 * self.dim, 8 * self.dim, 3, resample= 'down', hw=int(self.dim / 8)) self.ln1 = nn.Linear(self.ssize * self.ssize * 8 * self.dim, 1) def forward(self, input_0): primals_2 = self.conv1.conv.weight primals_3 = self.conv1.conv.bias primals_6 = self.rb1.bn1.weight primals_7 = self.rb1.bn1.bias primals_9 = self.rb1.bn2.weight primals_10 = self.rb1.bn2.bias primals_4 = self.rb1.conv_shortcut.conv.conv.weight primals_5 = self.rb1.conv_shortcut.conv.conv.bias primals_8 = self.rb1.conv_1.conv.weight primals_11 = self.rb1.conv_2.conv.conv.weight primals_12 = self.rb1.conv_2.conv.conv.bias primals_15 = self.rb2.bn1.weight primals_16 = self.rb2.bn1.bias primals_18 = self.rb2.bn2.weight primals_19 = self.rb2.bn2.bias primals_13 = self.rb2.conv_shortcut.conv.conv.weight primals_14 = self.rb2.conv_shortcut.conv.conv.bias primals_17 = self.rb2.conv_1.conv.weight primals_20 = self.rb2.conv_2.conv.conv.weight primals_21 = self.rb2.conv_2.conv.conv.bias primals_24 = self.rb3.bn1.weight primals_25 = self.rb3.bn1.bias primals_27 = self.rb3.bn2.weight primals_28 = self.rb3.bn2.bias primals_22 = self.rb3.conv_shortcut.conv.conv.weight primals_23 = self.rb3.conv_shortcut.conv.conv.bias primals_26 = self.rb3.conv_1.conv.weight primals_29 = self.rb3.conv_2.conv.conv.weight primals_30 = self.rb3.conv_2.conv.conv.bias primals_33 = self.rb4.bn1.weight primals_34 = self.rb4.bn1.bias primals_36 = self.rb4.bn2.weight primals_37 = self.rb4.bn2.bias primals_31 = self.rb4.conv_shortcut.conv.conv.weight primals_32 = self.rb4.conv_shortcut.conv.conv.bias primals_35 = self.rb4.conv_1.conv.weight primals_38 = self.rb4.conv_2.conv.conv.weight primals_39 = self.rb4.conv_2.conv.conv.bias primals_40 = self.ln1.weight primals_41 = self.ln1.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41]) return output[0]
justaboutlola/improved-wgan-pytorch
GoodDiscriminator
false
16,210
[ "MIT" ]
412
5bb0b729809152d9129ef72a9dd28b3ff83021a2
https://github.com/justaboutlola/improved-wgan-pytorch/tree/5bb0b729809152d9129ef72a9dd28b3ff83021a2
DQNLoss
import torch import numpy as np import torch.nn.functional as F from torch.nn.modules.loss import _Loss class DQNLoss(_Loss): def __init__(self, mode='huber', size_average=None, reduce=None, reduction='mean'): super().__init__(size_average, reduce, reduction) self.mode = mode self.loss = {'huber': F.smooth_l1_loss, 'mse': F.mse_loss}[mode] def forward(self, nn_outputs, actions, target_outputs): target = nn_outputs.clone().detach() target[np.arange(target.shape[0]), actions] = target_outputs return self.loss(nn_outputs, target, reduction=self.reduction) def get_inputs(): return [torch.rand([4, 4]), torch.ones([4, 4], dtype=torch.int64), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn.functional as F from torch.nn.modules.loss import _Loss assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_index_put_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_index_put_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp6 = tl.load(in_ptr1 + x2, xmask) tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask, 'index out of bounds: 0 <= tmp4 < 4') tl.store(out_ptr0 + (tmp4 + 4 * x0), tmp6, xmask) @triton.jit def triton_per_fused_smooth_l1_loss_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1.0 tmp5 = tmp3 < tmp4 tmp6 = tmp3 * tmp3 tmp7 = 0.5 tmp8 = tmp6 * tmp7 tmp9 = tmp8 * tmp4 tmp10 = tmp3 - tmp7 tmp11 = tl.where(tmp5, tmp9, tmp10) tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.sum(tmp12, 1)[:, None] tmp15 = 16.0 tmp16 = tmp14 / tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_index_put_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) triton_poi_fused_index_put_1[grid(16)](arg1_1, arg2_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg1_1 del arg2_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused_smooth_l1_loss_2[grid(1)](buf3, arg0_1, buf0, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del buf0 return buf3, class DQNLossNew(_Loss): def __init__(self, mode='huber', size_average=None, reduce=None, reduction='mean'): super().__init__(size_average, reduce, reduction) self.mode = mode self.loss = {'huber': F.smooth_l1_loss, 'mse': F.mse_loss}[mode] def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
opium-sh/prl
DQNLoss
false
16,211
[ "MIT" ]
51
3e21f8c7c87cfc7aee84d9e264c3a8b2bc549076
https://github.com/opium-sh/prl/tree/3e21f8c7c87cfc7aee84d9e264c3a8b2bc549076
PositionwiseFeedForwardNet
import torch import torch.nn as nn class PositionwiseFeedForwardNet(nn.Module): """ It's position-wise because this feed forward net will be independently applied to every token's representation. Representations batch is of the shape (batch size, max token sequence length, model dimension). This net will basically be applied independently to every token's representation (you can think of it as if there was a nested for-loop going over the batch size and max token sequence length dimensions and applied this net to token representations. PyTorch does this auto-magically behind the scenes. """ def __init__(self, model_dimension, dropout_probability, width_mult=4): super().__init__() self.linear1 = nn.Linear(model_dimension, width_mult * model_dimension) self.linear2 = nn.Linear(width_mult * model_dimension, model_dimension) self.dropout = nn.Dropout(p=dropout_probability) self.relu = nn.ReLU() def forward(self, representations_batch): return self.linear2(self.dropout(self.relu(self.linear1( representations_batch)))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'model_dimension': 4, 'dropout_probability': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 16), (16, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(1024)](buf1, primals_2, buf3, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 16), (16, 1), 0), primals_4, buf3 class PositionwiseFeedForwardNetNew(nn.Module): """ It's position-wise because this feed forward net will be independently applied to every token's representation. Representations batch is of the shape (batch size, max token sequence length, model dimension). This net will basically be applied independently to every token's representation (you can think of it as if there was a nested for-loop going over the batch size and max token sequence length dimensions and applied this net to token representations. PyTorch does this auto-magically behind the scenes. """ def __init__(self, model_dimension, dropout_probability, width_mult=4): super().__init__() self.linear1 = nn.Linear(model_dimension, width_mult * model_dimension) self.linear2 = nn.Linear(width_mult * model_dimension, model_dimension) self.dropout = nn.Dropout(p=dropout_probability) self.relu = nn.ReLU() def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
ozzieba/pytorch-original-transformer
PositionwiseFeedForwardNet
false
16,212
[ "MIT" ]
654
4c1e17a701fae050e362e962284fb99547636f75
https://github.com/ozzieba/pytorch-original-transformer/tree/4c1e17a701fae050e362e962284fb99547636f75
BertAttention
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn class BertLayerNorm(nn.Module): def __init__(self, config, variance_epsilon=1e-12): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.gamma = nn.Parameter(torch.ones(config.hidden_size)) self.beta = nn.Parameter(torch.zeros(config.hidden_size)) self.variance_epsilon = variance_epsilon def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.gamma * x + self.beta class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttention(nn.Module): def __init__(self, config): super(BertAttention, self).__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, input_tensor, attention_mask): self_output = self.self(input_tensor, attention_mask) attention_output = self.output(self_output, input_tensor) return attention_output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, num_attention_heads= 4, attention_probs_dropout_prob=0.5, hidden_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = float('-inf') tmp27 = tmp2 == tmp26 tmp28 = tmp27 == 0 tmp29 = tmp28.to(tl.int64) tmp30 = tmp29 != 0 tmp31 = tmp5 == tmp26 tmp32 = tmp31 == 0 tmp33 = tmp32.to(tl.int64) tmp34 = tmp33 != 0 tmp35 = tmp30 | tmp34 tmp36 = tmp9 == tmp26 tmp37 = tmp36 == 0 tmp38 = tmp37.to(tl.int64) tmp39 = tmp38 != 0 tmp40 = tmp35 | tmp39 tmp41 = tmp13 == tmp26 tmp42 = tmp41 == 0 tmp43 = tmp42.to(tl.int64) tmp44 = tmp43 != 0 tmp45 = tmp40 | tmp44 tl.store(out_ptr0 + x2, tmp14, xmask) tl.store(out_ptr1 + x2, tmp25, xmask) tl.store(out_ptr2 + x2, tmp45, xmask) @triton.jit def triton_poi_fused_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x4 = xindex x5 = xindex % 64 tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_out_ptr0 + x4, xmask) tmp3 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last') tmp1 = tmp0 == 0 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 / tmp8 tmp10 = 0.0 tmp11 = tl.where(tmp1, tmp10, tmp9) tl.store(in_out_ptr0 + x4, tmp11, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_mean_pow_sub_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_sqrt_sub_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x2, xmask) tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp7 = 1e-12 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tmp10 = tmp5 / tmp9 tmp11 = tmp0 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf1 buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool) triton_poi_fused_1[grid(64)](buf5, primals_8, buf6, buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_2[grid(256)](buf9, buf8, primals_8, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf8 del primals_8 buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf7 triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf10, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf6 triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0) del buf11 extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_10 buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_mean_pow_sub_5[grid(16)](buf13, primals_3, buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_sqrt_sub_6[grid(64)](primals_11, buf13, primals_3, buf14, buf15, primals_12, buf16, 64, XBLOCK= 64, num_warps=1, num_stages=1) del buf14 del buf15 del primals_12 return buf16, primals_3, primals_11, buf9, reinterpret_tensor(buf10, ( 16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0 ), reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, primals_9 class BertLayerNorm(nn.Module): def __init__(self, config, variance_epsilon=1e-12): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.gamma = nn.Parameter(torch.ones(config.hidden_size)) self.beta = nn.Parameter(torch.zeros(config.hidden_size)) self.variance_epsilon = variance_epsilon def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.gamma * x + self.beta class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttentionNew(nn.Module): def __init__(self, config): super(BertAttentionNew, self).__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, input_0, input_1): primals_1 = self.self.query.weight primals_2 = self.self.query.bias primals_4 = self.self.key.weight primals_5 = self.self.key.bias primals_6 = self.self.value.weight primals_7 = self.self.value.bias primals_9 = self.output.dense.weight primals_10 = self.output.dense.bias primals_11 = self.output.LayerNorm.gamma primals_12 = self.output.LayerNorm.beta primals_3 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
GingerNg/SDNet
BertAttention
false
16,213
[ "MIT" ]
112
48ad8cc57c9a02aaad10e34d0c91a174ac68f056
https://github.com/GingerNg/SDNet/tree/48ad8cc57c9a02aaad10e34d0c91a174ac68f056
PolicyGradientLoss
import torch import torch.nn.functional as F from torch.nn.modules.loss import _Loss class PolicyGradientLoss(_Loss): def __init__(self, size_average=None, reduce=None, reduction='mean'): super().__init__(size_average, reduce, reduction) def forward(self, nn_outputs, actions, returns): output_log_probs = F.log_softmax(nn_outputs, dim=1) log_prob_actions_v = returns * output_log_probs[range(len(actions)), actions] return -log_prob_actions_v.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.ones([4], dtype=torch.int64), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn.modules.loss import _Loss assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_index_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 x0 = xindex % 16 x2 = xindex tmp11 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 2, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 0, tl.int64) tmp6 = tl.where(tmp4, tmp5, tmp3) tmp7 = tl.full([1], 3, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.where(tmp8, tmp1, tmp7) tmp10 = tl.where(tmp2, tmp6, tmp9) tmp12 = tl.full([XBLOCK], 4, tl.int32) tmp13 = tmp11 + tmp12 tmp14 = tmp11 < 0 tmp15 = tl.where(tmp14, tmp13, tmp11) tl.device_assert((0 <= tmp15) & (tmp15 < 4) | ~xmask, 'index out of bounds: 0 <= tmp15 < 4') tmp17 = tl.load(in_ptr1 + (x0 + 16 * tmp15 + 64 * tmp10), xmask) tmp18 = tl.load(in_ptr1 + (x0 + 64 * tmp10), xmask) tmp19 = tl_math.exp(tmp18) tmp20 = tl.load(in_ptr1 + (16 + x0 + 64 * tmp10), xmask) tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tl.load(in_ptr1 + (32 + x0 + 64 * tmp10), xmask) tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tl.load(in_ptr1 + (48 + x0 + 64 * tmp10), xmask) tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tl_math.log(tmp28) tmp30 = tmp17 - tmp29 tl.store(out_ptr0 + x2, tmp30, xmask) @triton.jit def triton_per_fused__log_softmax_index_mean_mul_neg_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex r0 = rindex % 64 tmp0 = tl.load(in_ptr0 + r2, None) tmp1 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = 256.0 tmp7 = tmp5 / tmp6 tmp8 = -tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4,), (1,)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__log_softmax_index_1[grid(64)](arg1_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg1_1 del buf0 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused__log_softmax_index_mean_mul_neg_2[grid(1)](buf3, arg2_1, buf1, 1, 256, num_warps=2, num_stages=1) del arg2_1 del buf1 return buf3, class PolicyGradientLossNew(_Loss): def __init__(self, size_average=None, reduce=None, reduction='mean'): super().__init__(size_average, reduce, reduction) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
opium-sh/prl
PolicyGradientLoss
false
16,214
[ "MIT" ]
51
3e21f8c7c87cfc7aee84d9e264c3a8b2bc549076
https://github.com/opium-sh/prl/tree/3e21f8c7c87cfc7aee84d9e264c3a8b2bc549076
CorrelationPenaltyLoss
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.nn.init class CorrelationPenaltyLoss(nn.Module): def __init__(self): super(CorrelationPenaltyLoss, self).__init__() def forward(self, input): mean1 = torch.mean(input, dim=0) zeroed = input - mean1.expand_as(input) cor_mat = torch.bmm(torch.t(zeroed).unsqueeze(0), zeroed.unsqueeze(0) ).squeeze(0) d = torch.diag(torch.diag(cor_mat)) no_diag = cor_mat - d d_sq = no_diag * no_diag return torch.sqrt(d_sq.sum()) / input.size(0) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.nn.init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_per_fused_diag_embed_div_mul_sqrt_sub_sum_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex r0 = rindex % 4 r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + r2, None) tmp4 = tl.load(in_ptr0 + 5 * r0, None, eviction_policy='evict_last') tmp1 = r0 tmp2 = r1 tmp3 = tmp1 == tmp2 tmp5 = 0.0 tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = tmp0 - tmp6 tmp8 = tmp7 * tmp7 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp12 = libdevice.sqrt(tmp11) tmp13 = 0.25 tmp14 = tmp12 * tmp13 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp14, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sub_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (1, 4, 4), (0, 1, 4), 0 ), reinterpret_tensor(buf0, (1, 4, 4), (16, 4, 1), 0), out=buf1) del buf0 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused_diag_embed_div_mul_sqrt_sub_sum_1[grid(1)](buf3, buf1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf1 return buf3, class CorrelationPenaltyLossNew(nn.Module): def __init__(self): super(CorrelationPenaltyLossNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
oskyhn/CNNs-Without-Borders
CorrelationPenaltyLoss
false
16,215
[ "BSD-3-Clause" ]
74
4fae1d8fd64c3c917f5c78c3513a60572af961b1
https://github.com/oskyhn/CNNs-Without-Borders/tree/4fae1d8fd64c3c917f5c78c3513a60572af961b1
mIoULoss
import torch import torch.nn as nn import torch.nn.functional as F class mIoULoss(nn.Module): def __init__(self, weight=None, size_average=True, n_classes=4): super(mIoULoss, self).__init__() self.classes = n_classes def forward(self, inputs, target_oneHot): """ IoU Loss for individual examples inputs - N x {Classes or higher} x H x W target_oneHot - N x {Classes or higher} x H x W BG can be ignored """ N = inputs.size()[0] C = inputs.size()[1] inputs = F.softmax(inputs, dim=1) inter = inputs * target_oneHot inter = inter.view(N, C, -1).sum(2) union = inputs + target_oneHot - inputs * target_oneHot union = union.view(N, C, -1).sum(2) loss = inter / union return -(loss[:, -self.classes].mean() - 1.0) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_per_fused__softmax_sum_1(in_ptr0, in_ptr1, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (r2 + 64 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp2 = tl.load(in_ptr0 + (16 + r2 + 64 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp4 = tl.load(in_ptr0 + (32 + r2 + 64 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (48 + r2 + 64 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp9 = tl.load(in_ptr1 + (r2 + 16 * x3), xmask, other=0.0) tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp10 = tmp8 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tmp15 = tmp8 + tmp9 tmp16 = tmp15 - tmp10 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.where(xmask, tmp17, 0) tmp20 = tl.sum(tmp19, 1)[:, None] tl.store(out_ptr1 + x3, tmp14, xmask) tl.store(out_ptr2 + x3, tmp20, xmask) @triton.jit def triton_per_fused_mean_neg_sub_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp2 = tmp0 / tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = 4.0 tmp7 = tmp5 / tmp6 tmp8 = 1.0 tmp9 = tmp7 - tmp8 tmp10 = -tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_per_fused__softmax_sum_1[grid(16)](buf0, arg1_1, buf2, buf3, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del arg1_1 del buf0 buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4 del buf4 triton_per_fused_mean_neg_sub_2[grid(1)](buf5, buf2, buf3, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf2 del buf3 return buf5, class mIoULossNew(nn.Module): def __init__(self, weight=None, size_average=True, n_classes=4): super(mIoULossNew, self).__init__() self.classes = n_classes def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ozcell/ENet-SAD_Pytorch
mIoULoss
false
16,216
[ "MIT" ]
53
aaa79b5e96316e1bf24d3c2147ee622d4f17bc24
https://github.com/ozcell/ENet-SAD_Pytorch/tree/aaa79b5e96316e1bf24d3c2147ee622d4f17bc24
TransformerLayer
import torch import numpy as np import torch.nn as nn import torch.distributions class MultiHeadAttention(nn.Module): def __init__(self, d_model, n_heads, kq_same=False, bias=True): super().__init__() """ It has projection layer for getting keys, queries and values. Followed by attention. """ self.d_model = d_model self.h = n_heads self.d_k = self.d_model // self.h self.kq_same = kq_same if not kq_same: self.q_linear = nn.Linear(d_model, d_model, bias=bias) self.k_linear = nn.Linear(d_model, d_model, bias=bias) self.v_linear = nn.Linear(d_model, d_model, bias=bias) def head_split(self, x): new_x_shape = x.size()[:-1] + (self.h, self.d_k) return x.view(*new_x_shape).transpose(-2, -3) def forward(self, q, k, v, mask=None): origin_shape = q.size() if not self.kq_same: q = self.head_split(self.q_linear(q)) else: q = self.head_split(self.k_linear(q)) k = self.head_split(self.k_linear(k)) v = self.head_split(self.v_linear(v)) output = self.scaled_dot_product_attention(q, k, v, self.d_k, mask) output = output.transpose(-2, -3).reshape(origin_shape) return output @staticmethod def scaled_dot_product_attention(q, k, v, d_k, mask=None): """ This is called by Multi-head attention object to find the values. """ scores = torch.matmul(q, k.transpose(-2, -1)) / d_k ** 0.5 if mask is not None: scores = scores.masked_fill(mask == 0, -np.inf) scores = (scores - scores.max()).softmax(dim=-1) scores = scores.masked_fill(torch.isnan(scores), 0) output = torch.matmul(scores, v) return output class TransformerLayer(nn.Module): def __init__(self, d_model, d_ff, n_heads, dropout=0, kq_same=False): super().__init__() """ This is a Basic Block of Transformer. It contains one Multi-head attention object. Followed by layer norm and position wise feedforward net and dropout layer. """ self.masked_attn_head = MultiHeadAttention(d_model, n_heads, kq_same=kq_same) self.layer_norm1 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.linear1 = nn.Linear(d_model, d_ff) self.linear2 = nn.Linear(d_ff, d_model) self.layer_norm2 = nn.LayerNorm(d_model) self.dropout2 = nn.Dropout(dropout) def forward(self, seq, mask=None): context = self.masked_attn_head(seq, seq, seq, mask) context = self.layer_norm1(self.dropout1(context) + seq) output = self.linear1(context).relu() output = self.linear2(output) output = self.layer_norm2(self.dropout2(output) + context) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'd_ff': 4, 'n_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np import torch.nn as nn import torch.distributions assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_per_fused_div_max_1(in_ptr0, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp3, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp5, None) @triton.jit def triton_poi_fused__softmax_div_sub_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp5 = tmp2 - tmp4 tmp7 = tmp6 * tmp1 tmp8 = tmp7 - tmp4 tmp9 = triton_helpers.maximum(tmp5, tmp8) tmp11 = tmp10 * tmp1 tmp12 = tmp11 - tmp4 tmp13 = triton_helpers.maximum(tmp9, tmp12) tmp15 = tmp14 * tmp1 tmp16 = tmp15 - tmp4 tmp17 = triton_helpers.maximum(tmp13, tmp16) tmp18 = tmp5 - tmp17 tmp19 = tl_math.exp(tmp18) tmp20 = tmp8 - tmp17 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp12 - tmp17 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tmp16 - tmp17 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tl.store(out_ptr0 + x0, tmp17, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused__softmax_div_isnan_masked_fill_sub_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp5 = tmp2 - tmp4 tmp7 = tmp5 - tmp6 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 / tmp9 tmp11 = libdevice.isnan(tmp10).to(tl.int1) tmp12 = 0.0 tmp13 = tl.where(tmp11, tmp12, tmp10) tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp4 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp8 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp12 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) tl.store(out_ptr1 + x2, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2 + 4 * y3), xmask & ymask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + y3, ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + y3, ymask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2 + 4 * y3), tmp13, xmask & ymask) @triton.jit def triton_poi_fused_relu_threshold_backward_6(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 4), (4, 1)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch .float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64, 4)](buf0, primals_3, buf3, 64, 4, XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 4, 1, 4), (64, 16, 4, 4, 1), 0) del buf0 triton_poi_fused_clone_0[grid(64, 4)](buf1, primals_5, buf4, 64, 4, XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (64, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (64, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((), (), torch.float32) triton_per_fused_div_max_1[grid(1)](buf5, buf6, 1, 1024, num_warps= 8, num_stages=1) buf7 = reinterpret_tensor(buf1, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0 ) del buf1 buf8 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 256), torch.float32) triton_poi_fused__softmax_div_sub_2[grid(256)](buf5, buf6, buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) buf9 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_div_isnan_masked_fill_sub_3[grid(1024)](buf5, buf6, buf7, buf8, buf9, 1024, XBLOCK=128, num_warps=4, num_stages=1 ) buf10 = reinterpret_tensor(buf8, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0) del buf8 triton_poi_fused_clone_0[grid(64, 4)](buf2, primals_7, buf10, 64, 4, XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1) del primals_7 buf11 = reinterpret_tensor(buf2, (64, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf9, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (64, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf13 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_add_native_layer_norm_4[grid(64)](buf11, primals_1, buf12, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) buf14 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf7 triton_poi_fused_add_native_layer_norm_5[grid(64, 4)](buf11, primals_1, buf12, buf13, primals_8, primals_9, buf14, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del primals_9 buf15 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf14, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf15) buf16 = reinterpret_tensor(buf15, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf15 buf22 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_6[grid(256)](buf16, primals_11, buf22, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 buf17 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf16, (64, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), out=buf17) buf18 = reinterpret_tensor(buf17, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf17 triton_poi_fused_add_7[grid(256)](buf18, primals_13, buf14, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_13 buf19 = buf13 del buf13 buf20 = buf12 del buf12 triton_poi_fused_native_layer_norm_8[grid(64)](buf18, buf19, buf20, 64, XBLOCK=64, num_warps=1, num_stages=1) buf21 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_9[grid(256)](buf18, buf19, buf20, primals_14, primals_15, buf21, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf19 del buf20 del primals_15 return (buf21, primals_1, primals_8, primals_14, buf5, buf6, buf11, reinterpret_tensor(buf14, (64, 4), (4, 1), 0), reinterpret_tensor( buf16, (64, 4), (4, 1), 0), buf18, primals_12, buf22, primals_10, reinterpret_tensor(buf9, (64, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf10, (64, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (64, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (64, 4, 1), (4, 1, 4), 0)) class MultiHeadAttention(nn.Module): def __init__(self, d_model, n_heads, kq_same=False, bias=True): super().__init__() """ It has projection layer for getting keys, queries and values. Followed by attention. """ self.d_model = d_model self.h = n_heads self.d_k = self.d_model // self.h self.kq_same = kq_same if not kq_same: self.q_linear = nn.Linear(d_model, d_model, bias=bias) self.k_linear = nn.Linear(d_model, d_model, bias=bias) self.v_linear = nn.Linear(d_model, d_model, bias=bias) def head_split(self, x): new_x_shape = x.size()[:-1] + (self.h, self.d_k) return x.view(*new_x_shape).transpose(-2, -3) def forward(self, q, k, v, mask=None): origin_shape = q.size() if not self.kq_same: q = self.head_split(self.q_linear(q)) else: q = self.head_split(self.k_linear(q)) k = self.head_split(self.k_linear(k)) v = self.head_split(self.v_linear(v)) output = self.scaled_dot_product_attention(q, k, v, self.d_k, mask) output = output.transpose(-2, -3).reshape(origin_shape) return output @staticmethod def scaled_dot_product_attention(q, k, v, d_k, mask=None): """ This is called by Multi-head attention object to find the values. """ scores = torch.matmul(q, k.transpose(-2, -1)) / d_k ** 0.5 if mask is not None: scores = scores.masked_fill(mask == 0, -np.inf) scores = (scores - scores.max()).softmax(dim=-1) scores = scores.masked_fill(torch.isnan(scores), 0) output = torch.matmul(scores, v) return output class TransformerLayerNew(nn.Module): def __init__(self, d_model, d_ff, n_heads, dropout=0, kq_same=False): super().__init__() """ This is a Basic Block of Transformer. It contains one Multi-head attention object. Followed by layer norm and position wise feedforward net and dropout layer. """ self.masked_attn_head = MultiHeadAttention(d_model, n_heads, kq_same=kq_same) self.layer_norm1 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.linear1 = nn.Linear(d_model, d_ff) self.linear2 = nn.Linear(d_ff, d_model) self.layer_norm2 = nn.LayerNorm(d_model) self.dropout2 = nn.Dropout(dropout) def forward(self, input_0): primals_2 = self.masked_attn_head.q_linear.weight primals_3 = self.masked_attn_head.q_linear.bias primals_4 = self.masked_attn_head.k_linear.weight primals_5 = self.masked_attn_head.k_linear.bias primals_6 = self.masked_attn_head.v_linear.weight primals_7 = self.masked_attn_head.v_linear.bias primals_8 = self.layer_norm1.weight primals_9 = self.layer_norm1.bias primals_10 = self.linear1.weight primals_11 = self.linear1.bias primals_12 = self.linear2.weight primals_13 = self.linear2.bias primals_14 = self.layer_norm2.weight primals_15 = self.layer_norm2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return output[0]
nmrenyi/ReChorus
TransformerLayer
false
16,217
[ "MIT" ]
314
9ab632579d0464b0aaf365539f87b04866920b66
https://github.com/nmrenyi/ReChorus/tree/9ab632579d0464b0aaf365539f87b04866920b66
PositionEmbeddingLayer
import torch import torch.nn as nn import torch.utils.data from typing import Dict from typing import Tuple from abc import ABC from abc import abstractmethod class BaseLayer(nn.Module, ABC): """ Base Layer for the torecsys module """ def __init__(self, **kwargs): """ Initializer for BaseLayer Args: **kwargs: kwargs """ super(BaseLayer, self).__init__() @property @abstractmethod def inputs_size(self) ->Dict[str, Tuple[str, ...]]: """ Get inputs size of the layer Returns: Dict[str, Tuple[str, ...]]: dictionary of inputs_size """ raise NotImplementedError('not implemented') @property @abstractmethod def outputs_size(self) ->Dict[str, Tuple[str, ...]]: """ Get outputs size of the layer Returns: Dict[str, Tuple[str, ...]]: dictionary of outputs_size """ raise NotImplementedError('not implemented') class PositionEmbeddingLayer(BaseLayer): """ Layer class of Position Embedding Position Embedding was used in Personalized Re-ranking Model :title:`Changhua Pei et al, 2019`[1], which is to add a trainable tensors per position to the session-based embedding features tensor. :Reference: `Changhua Pei et al, 2019. Personalized Re-ranking for Recommendation <https://arxiv.org/abs/1904.06813>`_. """ @property def inputs_size(self) ->Dict[str, Tuple[str, ...]]: return {'inputs': ('B', 'L', 'E')} @property def outputs_size(self) ->Dict[str, Tuple[str, ...]]: return {'outputs': ('B', 'L', 'E')} def __init__(self, max_num_position: 'int'): """ Initialize PositionEmbedding Args: max_num_position (int): maximum number of position in a sequence """ super().__init__() self.bias = nn.Parameter(torch.Tensor(1, max_num_position, 1)) nn.init.normal_(self.bias) def forward(self, session_embed_inputs: 'torch.Tensor') ->torch.Tensor: """ Forward calculation of PositionEmbedding Args: session_embed_inputs (T), shape = (B, L, E), data_type = torch.float: embedded feature tensors of session Returns: T, shape = (B, L, E), data_type = torch.float: output of PositionEmbedding """ return session_embed_inputs + self.bias def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'max_num_position': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data from typing import Dict from typing import Tuple from abc import ABC from abc import abstractmethod assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, class BaseLayer(nn.Module, ABC): """ Base Layer for the torecsys module """ def __init__(self, **kwargs): """ Initializer for BaseLayer Args: **kwargs: kwargs """ super(BaseLayer, self).__init__() @property @abstractmethod def inputs_size(self) ->Dict[str, Tuple[str, ...]]: """ Get inputs size of the layer Returns: Dict[str, Tuple[str, ...]]: dictionary of inputs_size """ raise NotImplementedError('not implemented') @property @abstractmethod def outputs_size(self) ->Dict[str, Tuple[str, ...]]: """ Get outputs size of the layer Returns: Dict[str, Tuple[str, ...]]: dictionary of outputs_size """ raise NotImplementedError('not implemented') class PositionEmbeddingLayerNew(BaseLayer): """ Layer class of Position Embedding Position Embedding was used in Personalized Re-ranking Model :title:`Changhua Pei et al, 2019`[1], which is to add a trainable tensors per position to the session-based embedding features tensor. :Reference: `Changhua Pei et al, 2019. Personalized Re-ranking for Recommendation <https://arxiv.org/abs/1904.06813>`_. """ @property def inputs_size(self) ->Dict[str, Tuple[str, ...]]: return {'inputs': ('B', 'L', 'E')} @property def outputs_size(self) ->Dict[str, Tuple[str, ...]]: return {'outputs': ('B', 'L', 'E')} def __init__(self, max_num_position: 'int'): """ Initialize PositionEmbedding Args: max_num_position (int): maximum number of position in a sequence """ super().__init__() self.bias = nn.Parameter(torch.Tensor(1, max_num_position, 1)) nn.init.normal_(self.bias) def forward(self, input_0): primals_1 = self.bias primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
p768lwy3/torecsys
PositionEmbeddingLayer
false
16,218
[ "MIT" ]
92
2251366268b4fbe6f8c3ab1628fa72a0db043dcd
https://github.com/p768lwy3/torecsys/tree/2251366268b4fbe6f8c3ab1628fa72a0db043dcd
UniformBatchMiner
import torch import torch.nn as nn import torch.utils.data from typing import Any from typing import Dict from typing import List from typing import Tuple from abc import ABC from abc import abstractmethod class BaseMiner(nn.Module, ABC): def __init__(self, *args: List[Any], **kwargs: Dict[str, Any]): super().__init__() @abstractmethod def forward(self, anchor: 'torch.Tensor', target: 'torch.Tensor') ->Tuple[ torch.Tensor, torch.Tensor, torch.Tensor]: raise NotImplementedError class UniformBatchMiner(BaseMiner): def __init__(self, sample_size: 'int'): super().__init__() self.sample_size = sample_size def forward(self, anchor: 'torch.Tensor', target: 'torch.Tensor') ->Tuple[ torch.Tensor, torch.Tensor]: batch_size = target.size(0) rand_idx = torch.randint(0, batch_size, (self.sample_size * batch_size,)) neg_samples = target[rand_idx].unsqueeze(1) pos_samples = target.unsqueeze(1) anchor = anchor.unsqueeze(1) repeated = torch.repeat_interleave(anchor, self.sample_size, dim=0) pos = torch.cat([anchor, pos_samples], dim=1) neg = torch.cat([repeated, neg_samples], dim=1) return pos, neg def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'sample_size': 4}]
import torch from torch import device import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data from typing import Any from typing import Dict from typing import List from typing import Tuple from abc import ABC from abc import abstractmethod assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 64 % 2 x0 = xindex % 64 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 2, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 64 * x2), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 64 % 2 x0 = xindex % 64 x2 = xindex // 128 x4 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * (x2 // 4)), tmp4, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 2, tl.int64) tmp9 = tl.load(in_ptr1 + x2, tmp6, eviction_policy='evict_last', other=0.0) tmp10 = tl.full([XBLOCK], 4, tl.int32) tmp11 = tmp9 + tmp10 tmp12 = tmp9 < 0 tmp13 = tl.where(tmp12, tmp11, tmp9) tl.device_assert((0 <= tl.broadcast_to(tmp13, [XBLOCK])) & (tl. broadcast_to(tmp13, [XBLOCK]) < 4) | ~tmp6, 'index out of bounds: 0 <= tl.broadcast_to(tmp13, [XBLOCK]) < 4') tmp15 = tl.load(in_ptr2 + (x0 + 64 * tmp13), tmp6, other=0.0) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x4, tmp16, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 4, 4, 4), (128, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](arg1_1, arg0_1, buf0, 512, XBLOCK =256, num_warps=4, num_stages=1) buf1 = torch.ops.aten.randint.low(0, 4, [16], device=device(type= 'cuda', index=0), pin_memory=False) buf2 = buf1 del buf1 buf3 = empty_strided_cuda((16, 2, 4, 4, 4), (128, 64, 16, 4, 1), torch.float32) triton_poi_fused_cat_1[grid(2048)](arg1_1, buf2, arg0_1, buf3, 2048, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del buf2 return buf0, buf3 class BaseMiner(nn.Module, ABC): def __init__(self, *args: List[Any], **kwargs: Dict[str, Any]): super().__init__() @abstractmethod def forward(self, anchor: 'torch.Tensor', target: 'torch.Tensor') ->Tuple[ torch.Tensor, torch.Tensor, torch.Tensor]: raise NotImplementedError class UniformBatchMinerNew(BaseMiner): def __init__(self, sample_size: 'int'): super().__init__() self.sample_size = sample_size def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0], output[1]
p768lwy3/torecsys
UniformBatchMiner
false
16,219
[ "MIT" ]
92
2251366268b4fbe6f8c3ab1628fa72a0db043dcd
https://github.com/p768lwy3/torecsys/tree/2251366268b4fbe6f8c3ab1628fa72a0db043dcd
ModulatedConv2d
import math import torch from torch import nn from torch.nn import functional as F def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): rest_dim = [1] * (input.ndim - bias.ndim - 1) input = input if input.ndim == 3: return F.leaky_relu(input + bias.view(1, *rest_dim, bias.shape[0]), negative_slope=negative_slope) * scale else: return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=negative_slope) * scale def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): _, channel, in_h, in_w = input.shape input = input.reshape(-1, in_h, in_w, 1) _, in_h, in_w, minor = input.shape kernel_h, kernel_w = kernel.shape out = input.view(-1, in_h, 1, in_w, 1, minor) out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) out = out.view(-1, in_h * up_y, in_w * up_x, minor) out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]) out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(- pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :] out = out.permute(0, 3, 1, 2) out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) out = F.conv2d(out, w) out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1) out = out.permute(0, 2, 3, 1) out = out[:, ::down_y, ::down_x, :] out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 return out.view(-1, channel, out_h, out_w) def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1 ], pad[0], pad[1]) return out class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style, input_is_stylespace=False): batch, in_channel, height, width = input.shape if not input_is_stylespace: style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out, style def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math from torch import nn from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_per_fused_add_mul_pow_rsqrt_sum_2(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r5 = rindex x0 = xindex % 4 r3 = rindex // 16 x1 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (r5 + 64 * x0), xmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + 4 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = 0.125 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp4 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + x4, tmp12, xmask) tl.store(out_ptr0 + (r5 + 64 * x4), tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf1 buf3 = buf0 del buf0 buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_per_fused_add_mul_pow_rsqrt_sum_2[grid(16)](buf4, primals_5, buf2, buf5, 16, 64, XBLOCK=8, num_warps=4, num_stages=1) buf6 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf6, (1, 16, 5, 5), (400, 25, 5, 1)) return reinterpret_tensor(buf6, (4, 4, 5, 5), (100, 25, 5, 1), 0 ), reinterpret_tensor(buf2, (4, 1, 4, 1, 1), (4, 4, 1, 1, 1), 0 ), primals_4, primals_5, reinterpret_tensor(buf2, (4, 1, 4, 1, 1), (4, 4, 1, 1, 1), 0), buf4, reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), ( 256, 16, 4, 1), 0) def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): rest_dim = [1] * (input.ndim - bias.ndim - 1) input = input if input.ndim == 3: return F.leaky_relu(input + bias.view(1, *rest_dim, bias.shape[0]), negative_slope=negative_slope) * scale else: return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=negative_slope) * scale def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): _, channel, in_h, in_w = input.shape input = input.reshape(-1, in_h, in_w, 1) _, in_h, in_w, minor = input.shape kernel_h, kernel_w = kernel.shape out = input.view(-1, in_h, 1, in_w, 1, minor) out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) out = out.view(-1, in_h * up_y, in_w * up_x, minor) out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]) out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(- pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :] out = out.permute(0, 3, 1, 2) out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) out = F.conv2d(out, w) out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1) out = out.permute(0, 2, 3, 1) out = out[:, ::down_y, ::down_x, :] out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 return out.view(-1, channel, out_h, out_w) def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1 ], pad[0], pad[1]) return out class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class ModulatedConv2dNew(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input_0, input_1): primals_5 = self.weight primals_2 = self.modulation.weight primals_3 = self.modulation.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
ozmig77/StyleCLIP-1
ModulatedConv2d
false
16,220
[ "MIT" ]
2,732
57b887bba971ef86c107f4805785ce44fca3efef
https://github.com/ozmig77/StyleCLIP-1/tree/57b887bba971ef86c107f4805785ce44fca3efef
Policy
import torch import torch.nn as nn import torch.nn.functional as F class Policy(nn.Module): def __init__(self): super(Policy, self).__init__() self.affine1 = nn.Linear(4, 128) self.action_head = nn.Linear(128, 2) self.value_head = nn.Linear(128, 1) self.saved_actions = [] self.rewards = [] def forward(self, x): x = F.relu(self.affine1(x)) action_scores = self.action_head(x) state_values = self.value_head(x) return F.softmax(action_scores), state_values def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 8 x2 = xindex // 32 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (8 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (24 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 8 x2 = xindex // 32 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (8 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (24 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (2, 128), (128, 1)) assert_size_stride(primals_5, (2,), (1,)) assert_size_stride(primals_6, (1, 128), (128, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1, primals_2, buf7, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 2), (1, 128), 0), alpha=1, beta=1, out=buf2) del primals_5 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 1), (1, 128), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32) triton_poi_fused__softmax_1[grid(128)](buf2, buf5, 128, XBLOCK=128, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf2, (4, 4, 4, 2), (32, 8, 2, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(128)](buf5, buf6, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf5 return buf6, reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 128), (128, 1), 0 ), buf6, primals_6, primals_4, buf7 class PolicyNew(nn.Module): def __init__(self): super(PolicyNew, self).__init__() self.affine1 = nn.Linear(4, 128) self.action_head = nn.Linear(128, 2) self.value_head = nn.Linear(128, 1) self.saved_actions = [] self.rewards = [] def forward(self, input_0): primals_1 = self.affine1.weight primals_2 = self.affine1.bias primals_4 = self.action_head.weight primals_5 = self.action_head.bias primals_6 = self.value_head.weight primals_7 = self.value_head.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
nosyndicate/PyTorchRL
Policy
false
16,221
[ "MIT" ]
48
c4fb69ffebaa7f56b4210388f9eea7d42ca853e4
https://github.com/nosyndicate/PyTorchRL/tree/c4fb69ffebaa7f56b4210388f9eea7d42ca853e4
FieldEachTypeBilinear
import math import torch import torch.nn as nn import torch.utils.data from typing import Dict from typing import Tuple from abc import ABC from abc import abstractmethod class BaseLayer(nn.Module, ABC): """ Base Layer for the torecsys module """ def __init__(self, **kwargs): """ Initializer for BaseLayer Args: **kwargs: kwargs """ super(BaseLayer, self).__init__() @property @abstractmethod def inputs_size(self) ->Dict[str, Tuple[str, ...]]: """ Get inputs size of the layer Returns: Dict[str, Tuple[str, ...]]: dictionary of inputs_size """ raise NotImplementedError('not implemented') @property @abstractmethod def outputs_size(self) ->Dict[str, Tuple[str, ...]]: """ Get outputs size of the layer Returns: Dict[str, Tuple[str, ...]]: dictionary of outputs_size """ raise NotImplementedError('not implemented') class FieldEachTypeBilinear(BaseLayer): """ Applies a bilinear transformation to the incoming data: :math:`y = x_1 \\cdot W \\odot x_2 + b` Args: in_features: size of first dimension in first input sample and second input sample in1_features: size of each first input sample in2_features: size of each second input sample bias: If set to False, the layer will not learn an additive bias. Default: ``True`` Shape: - Input1: :math:`(N, *, H_{in1})` where :math:`H_{in1}=\\text{in1\\_features}` and :math:`*` means any number of additional dimensions. All but the last dimension of the inputs should be the same - Input2: :math:`(N, *, H_{in2})` where :math:`H_{in2}=\\text{in2\\_features}` - Output: :math:`(N, *, H_{out})` where :math:`H_{out}=\\text{out\\_features}` and all but the last dimension are the same shape as the input Examples:: >>> m = FieldAllTypeBilinear(20, 20) >>> input1 = torch.randn(128, 10, 20) >>> input2 = torch.randn(128, 10, 3) >>> output = m(input1, input2) >>> print(output.size()) torch.Size([128, 10, 3]) """ @property def inputs_size(self) ->Dict[str, Tuple[str, ...]]: return {'inputs1': ('B', 'NC2', 'E'), 'inputs2': ('B', 'NC2', 'E')} @property def outputs_size(self) ->Dict[str, Tuple[str, ...]]: return {'outputs': ('B', 'NC2', 'E')} __constants__ = ['in_features', 'in1_features', 'in2_features', 'bias'] def __init__(self, in_features, in1_features, in2_features, bias=True): super(FieldEachTypeBilinear, self).__init__() self.in1_features = in1_features self.in2_features = in2_features self.weight = nn.Parameter(torch.Tensor(in_features, in1_features, in2_features)) if bias: self.bias = nn.Parameter(torch.Tensor(in_features, in2_features)) else: self.register_parameter('bias', nn.Parameter(torch.tensor([0]))) self.reset_parameters() def reset_parameters(self): bound = 1 / math.sqrt(self.weight.shape[0]) nn.init.uniform_(self.weight, -bound, bound) if self.bias is not None: nn.init.uniform_(self.bias, -bound, bound) def forward(self, input1, input2): output = torch.matmul(input1.unsqueeze(-2), self.weight).squeeze(-2) output = torch.mul(output, input2) if self.bias is not None: output += self.bias return output def extra_repr(self): return ( f'in1_features={self.in1_features}, in2_features={self.in2_features}, bias={self.bias is not None}' ) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'in1_features': 4, 'in2_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn import torch.utils.data from typing import Dict from typing import Tuple from abc import ABC from abc import abstractmethod assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_add_mul_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(1024)](primals_2, buf0, 1024, XBLOCK= 128, num_warps=4, num_stages=1) del primals_2 buf1 = empty_strided_cuda((64, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (64, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf0, (64, 4, 4), (16, 4, 1), 0), out=buf1) del buf0 buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused_add_mul_1[grid(256)](buf2, primals_3, primals_4, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 return buf2, primals_3, reinterpret_tensor(primals_1, (64, 4, 1), (4, 1, 4), 0) class BaseLayer(nn.Module, ABC): """ Base Layer for the torecsys module """ def __init__(self, **kwargs): """ Initializer for BaseLayer Args: **kwargs: kwargs """ super(BaseLayer, self).__init__() @property @abstractmethod def inputs_size(self) ->Dict[str, Tuple[str, ...]]: """ Get inputs size of the layer Returns: Dict[str, Tuple[str, ...]]: dictionary of inputs_size """ raise NotImplementedError('not implemented') @property @abstractmethod def outputs_size(self) ->Dict[str, Tuple[str, ...]]: """ Get outputs size of the layer Returns: Dict[str, Tuple[str, ...]]: dictionary of outputs_size """ raise NotImplementedError('not implemented') class FieldEachTypeBilinearNew(BaseLayer): """ Applies a bilinear transformation to the incoming data: :math:`y = x_1 \\cdot W \\odot x_2 + b` Args: in_features: size of first dimension in first input sample and second input sample in1_features: size of each first input sample in2_features: size of each second input sample bias: If set to False, the layer will not learn an additive bias. Default: ``True`` Shape: - Input1: :math:`(N, *, H_{in1})` where :math:`H_{in1}=\\text{in1\\_features}` and :math:`*` means any number of additional dimensions. All but the last dimension of the inputs should be the same - Input2: :math:`(N, *, H_{in2})` where :math:`H_{in2}=\\text{in2\\_features}` - Output: :math:`(N, *, H_{out})` where :math:`H_{out}=\\text{out\\_features}` and all but the last dimension are the same shape as the input Examples:: >>> m = FieldAllTypeBilinear(20, 20) >>> input1 = torch.randn(128, 10, 20) >>> input2 = torch.randn(128, 10, 3) >>> output = m(input1, input2) >>> print(output.size()) torch.Size([128, 10, 3]) """ @property def inputs_size(self) ->Dict[str, Tuple[str, ...]]: return {'inputs1': ('B', 'NC2', 'E'), 'inputs2': ('B', 'NC2', 'E')} @property def outputs_size(self) ->Dict[str, Tuple[str, ...]]: return {'outputs': ('B', 'NC2', 'E')} __constants__ = ['in_features', 'in1_features', 'in2_features', 'bias'] def __init__(self, in_features, in1_features, in2_features, bias=True): super(FieldEachTypeBilinearNew, self).__init__() self.in1_features = in1_features self.in2_features = in2_features self.weight = nn.Parameter(torch.Tensor(in_features, in1_features, in2_features)) if bias: self.bias = nn.Parameter(torch.Tensor(in_features, in2_features)) else: self.register_parameter('bias', nn.Parameter(torch.tensor([0]))) self.reset_parameters() def reset_parameters(self): bound = 1 / math.sqrt(self.weight.shape[0]) nn.init.uniform_(self.weight, -bound, bound) if self.bias is not None: nn.init.uniform_(self.bias, -bound, bound) def extra_repr(self): return ( f'in1_features={self.in1_features}, in2_features={self.in2_features}, bias={self.bias is not None}' ) def forward(self, input_0, input_1): primals_2 = self.weight primals_4 = self.bias primals_1 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
p768lwy3/torecsys
FieldEachTypeBilinear
false
16,222
[ "MIT" ]
92
2251366268b4fbe6f8c3ab1628fa72a0db043dcd
https://github.com/p768lwy3/torecsys/tree/2251366268b4fbe6f8c3ab1628fa72a0db043dcd
ToRGB
import math import torch from torch import nn from torch.nn import functional as F def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): rest_dim = [1] * (input.ndim - bias.ndim - 1) input = input if input.ndim == 3: return F.leaky_relu(input + bias.view(1, *rest_dim, bias.shape[0]), negative_slope=negative_slope) * scale else: return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=negative_slope) * scale def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): _, channel, in_h, in_w = input.shape input = input.reshape(-1, in_h, in_w, 1) _, in_h, in_w, minor = input.shape kernel_h, kernel_w = kernel.shape out = input.view(-1, in_h, 1, in_w, 1, minor) out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) out = out.view(-1, in_h * up_y, in_w * up_x, minor) out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]) out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(- pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :] out = out.permute(0, 3, 1, 2) out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) out = F.conv2d(out, w) out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1) out = out.permute(0, 2, 3, 1) out = out[:, ::down_y, ::down_x, :] out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 return out.view(-1, channel, out_h, out_w) def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1 ], pad[0], pad[1]) return out class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style, input_is_stylespace=False): batch, in_channel, height, width = input.shape if not input_is_stylespace: style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out, style class Upsample(nn.Module): def __init__(self, kernel, factor=2): super().__init__() self.factor = factor kernel = make_kernel(kernel) * factor ** 2 self.register_buffer('kernel', kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 self.pad = pad0, pad1 def forward(self, input): out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad= self.pad) return out class ToRGB(nn.Module): def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]): super().__init__() if upsample: self.upsample = Upsample(blur_kernel) self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate =False) self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) def forward(self, input, style, skip=None, input_is_stylespace=False): out, style = self.conv(input, style, input_is_stylespace= input_is_stylespace) out = out + self.bias if skip is not None: skip = self.upsample(skip) out = out + skip return out, style def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math from torch import nn from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 12 x0 = xindex % 4 x2 = xindex // 12 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 3, 4, 1, 1), (12, 4, 1, 1, 1)) assert_size_stride(primals_6, (1, 3, 1, 1), (3, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 buf3 = empty_strided_cuda((4, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch. float32) triton_poi_fused_mul_2[grid(48)](primals_5, buf2, buf3, 48, XBLOCK= 64, num_warps=1, num_stages=1) buf4 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf3, (12, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf4, (1, 12, 4, 4), (192, 16, 4, 1)) buf5 = reinterpret_tensor(buf4, (4, 3, 4, 4), (48, 16, 4, 1), 0) del buf4 triton_poi_fused_add_3[grid(192)](buf5, primals_6, 192, XBLOCK=256, num_warps=4, num_stages=1) del primals_6 return buf5, reinterpret_tensor(buf2, (4, 1, 4, 1, 1), (4, 4, 1, 1, 1), 0 ), primals_4, primals_5, reinterpret_tensor(buf2, (4, 1, 4, 1, 1), (4, 4, 1, 1, 1), 0), reinterpret_tensor(buf3, (12, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0) def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): rest_dim = [1] * (input.ndim - bias.ndim - 1) input = input if input.ndim == 3: return F.leaky_relu(input + bias.view(1, *rest_dim, bias.shape[0]), negative_slope=negative_slope) * scale else: return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=negative_slope) * scale def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): _, channel, in_h, in_w = input.shape input = input.reshape(-1, in_h, in_w, 1) _, in_h, in_w, minor = input.shape kernel_h, kernel_w = kernel.shape out = input.view(-1, in_h, 1, in_w, 1, minor) out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) out = out.view(-1, in_h * up_y, in_w * up_x, minor) out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]) out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(- pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :] out = out.permute(0, 3, 1, 2) out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) out = F.conv2d(out, w) out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1) out = out.permute(0, 2, 3, 1) out = out[:, ::down_y, ::down_x, :] out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 return out.view(-1, channel, out_h, out_w) def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1 ], pad[0], pad[1]) return out class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style, input_is_stylespace=False): batch, in_channel, height, width = input.shape if not input_is_stylespace: style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out, style class Upsample(nn.Module): def __init__(self, kernel, factor=2): super().__init__() self.factor = factor kernel = make_kernel(kernel) * factor ** 2 self.register_buffer('kernel', kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 self.pad = pad0, pad1 def forward(self, input): out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad= self.pad) return out class ToRGBNew(nn.Module): def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]): super().__init__() if upsample: self.upsample = Upsample(blur_kernel) self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate =False) self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) def forward(self, input_0, input_1): primals_6 = self.bias primals_5 = self.conv.weight primals_2 = self.conv.modulation.weight primals_3 = self.conv.modulation.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0], output[1]
ozmig77/StyleCLIP-1
ToRGB
false
16,223
[ "MIT" ]
2,732
57b887bba971ef86c107f4805785ce44fca3efef
https://github.com/ozmig77/StyleCLIP-1/tree/57b887bba971ef86c107f4805785ce44fca3efef
SelfAttention
import torch import torch.nn as nn import torch.nn.functional as F def mask_(matrices, maskval=0.0, mask_diagonal=True): _b, h, w = matrices.size() indices = torch.triu_indices(h, w, offset=0 if mask_diagonal else 1) matrices[:, indices[0], indices[1]] = maskval class SelfAttention(nn.Module): def __init__(self, emb, heads=8, mask=False): super().__init__() self.emb = emb self.heads = heads self.mask = mask self.tokeys = nn.Linear(emb, emb * heads, bias=False) self.toqueries = nn.Linear(emb, emb * heads, bias=False) self.tovalues = nn.Linear(emb, emb * heads, bias=False) self.unifyheads = nn.Linear(heads * emb, emb) def forward(self, x, mask): b, t, e = x.size() h = self.heads keys = self.tokeys(x).view(b, t, h, e) queries = self.toqueries(x).view(b, t, h, e) values = self.tovalues(x).view(b, t, h, e) keys = keys.transpose(1, 2).contiguous().view(b * h, t, e) queries = queries.transpose(1, 2).contiguous().view(b * h, t, e) values = values.transpose(1, 2).contiguous().view(b * h, t, e) queries = queries / e ** (1 / 4) keys = keys / e ** (1 / 4) dot = torch.bmm(queries, keys.transpose(1, 2)) assert dot.size() == (b * h, t, t) if self.mask: mask_(dot, maskval=float('-inf'), mask_diagonal=False) if mask is not None: dot = dot.masked_fill(mask == 0, -1000000000.0) dot = F.softmax(dot, dim=2) out = torch.bmm(dot, values).view(b, h, t, e) out = out.transpose(1, 2).contiguous().view(b, t, h * e) return self.unifyheads(out) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'emb': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 32 x2 = xindex // 128 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x1 % 8) + 32 * x2 + 128 * (x1 // 8) ), xmask) tmp1 = 0.7071067811865475 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x2 % 8) + 32 * x1 + 128 * (x2 // 8) ), xmask) tmp1 = 0.7071067811865475 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_eq_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp5 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp9 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp13 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = -1000000000.0 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp6 = tl.where(tmp4, tmp2, tmp5) tmp7 = triton_helpers.maximum(tmp3, tmp6) tmp10 = tl.where(tmp8, tmp2, tmp9) tmp11 = triton_helpers.maximum(tmp7, tmp10) tmp14 = tl.where(tmp12, tmp2, tmp13) tmp15 = triton_helpers.maximum(tmp11, tmp14) tmp16 = tmp3 - tmp15 tmp17 = tl_math.exp(tmp16) tmp18 = tmp6 - tmp15 tmp19 = tl_math.exp(tmp18) tmp20 = tmp17 + tmp19 tmp21 = tmp10 - tmp15 tmp22 = tl_math.exp(tmp21) tmp23 = tmp20 + tmp22 tmp24 = tmp14 - tmp15 tmp25 = tl_math.exp(tmp24) tmp26 = tmp23 + tmp25 tl.store(out_ptr0 + x2, tmp15, xmask) tl.store(out_ptr1 + x2, tmp26, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 16 x4 = xindex x5 = xindex // 4 tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_out_ptr0 + x4, xmask) tmp4 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp2 = -1000000000.0 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tl.store(in_out_ptr0 + x4, tmp8, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 8 x3 = xindex // 128 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 32 * x1 + 128 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 8 x2 = xindex // 32 % 4 x3 = xindex // 128 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 128 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_transpose_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 128 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (32, 4), (4, 1)) assert_size_stride(primals_3, (32, 4), (4, 1)) assert_size_stride(primals_4, (32, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, 32), (32, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 32), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 32), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((16, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 32), (1, 4), 0), out=buf2) del primals_4 buf3 = empty_strided_cuda((32, 4, 4), (4, 128, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(512)](buf1, buf3, 512, XBLOCK=256, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf1, (32, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused_div_1[grid(512)](buf0, buf4, 512, XBLOCK=256, num_warps=4, num_stages=1) buf5 = reinterpret_tensor(buf0, (32, 4, 4), (16, 4, 1), 0) del buf0 extern_kernels.bmm(buf3, reinterpret_tensor(buf4, (32, 4, 4), (16, 1, 4), 0), out=buf5) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_eq_2[grid(16)](primals_5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf7 = empty_strided_cuda((32, 4, 1), (4, 1, 128), torch.float32) buf8 = empty_strided_cuda((32, 4, 1), (4, 1, 128), torch.float32) triton_poi_fused__softmax_masked_fill_3[grid(128)](buf6, buf5, buf7, buf8, 128, XBLOCK=128, num_warps=4, num_stages=1) buf9 = buf5 del buf5 triton_poi_fused__softmax_masked_fill_4[grid(512)](buf9, buf6, buf7, buf8, 512, XBLOCK=128, num_warps=4, num_stages=1) del buf7 del buf8 buf10 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32 ) triton_poi_fused_clone_5[grid(512)](buf2, buf10, 512, XBLOCK=128, num_warps=4, num_stages=1) buf11 = reinterpret_tensor(buf2, (32, 4, 4), (16, 4, 1), 0) del buf2 extern_kernels.bmm(buf9, reinterpret_tensor(buf10, (32, 4, 4), (16, 4, 1), 0), out=buf11) buf12 = empty_strided_cuda((4, 4, 8, 4), (128, 32, 4, 1), torch.float32 ) triton_poi_fused_clone_6[grid(512)](buf11, buf12, 512, XBLOCK=128, num_warps=4, num_stages=1) buf13 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf12, (16, 32), (32, 1), 0), reinterpret_tensor(primals_6, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf13) del primals_7 buf14 = reinterpret_tensor(buf11, (32, 4, 4), (16, 1, 4), 0) del buf11 triton_poi_fused_transpose_7[grid(512)](buf3, buf14, 512, XBLOCK= 128, num_warps=4, num_stages=1) del buf3 return reinterpret_tensor(buf13, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), buf6, buf9, reinterpret_tensor(buf12, (16, 32), (32, 1), 0 ), primals_6, reinterpret_tensor(buf10, (32, 4, 4), (16, 1, 4), 0 ), buf14, buf4 def mask_(matrices, maskval=0.0, mask_diagonal=True): _b, h, w = matrices.size() indices = torch.triu_indices(h, w, offset=0 if mask_diagonal else 1) matrices[:, indices[0], indices[1]] = maskval class SelfAttentionNew(nn.Module): def __init__(self, emb, heads=8, mask=False): super().__init__() self.emb = emb self.heads = heads self.mask = mask self.tokeys = nn.Linear(emb, emb * heads, bias=False) self.toqueries = nn.Linear(emb, emb * heads, bias=False) self.tovalues = nn.Linear(emb, emb * heads, bias=False) self.unifyheads = nn.Linear(heads * emb, emb) def forward(self, input_0, input_1): primals_2 = self.tokeys.weight primals_3 = self.toqueries.weight primals_4 = self.tovalues.weight primals_6 = self.unifyheads.weight primals_7 = self.unifyheads.bias primals_1 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
ouyangshixiong/UPDeT
SelfAttention
false
16,224
[ "MIT" ]
90
e6010ff8a8a3ce064900f3f040a9a34218c97e0e
https://github.com/ouyangshixiong/UPDeT/tree/e6010ff8a8a3ce064900f3f040a9a34218c97e0e
BertOutput
from _paritybench_helpers import _mock_config import torch import torch.nn import torch.nn as nn class BertOutput(nn.Module): """BERT output layer. Based on: BERT (pytorch-transformer) https://github.com/huggingface/transformers """ def __init__(self, config) ->None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = torch.nn.LayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, hidden_dropout_prob= 0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-12 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_0[grid(256)](buf1, primals_2, primals_4, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](buf1, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_2[grid(256)](buf1, buf2, buf3, primals_5, primals_6, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del buf3 del primals_6 return buf4, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1 class BertOutputNew(nn.Module): """BERT output layer. Based on: BERT (pytorch-transformer) https://github.com/huggingface/transformers """ def __init__(self, config) ->None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = torch.nn.LayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_0, input_1): primals_1 = self.dense.weight primals_2 = self.dense.bias primals_5 = self.LayerNorm.weight primals_6 = self.LayerNorm.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
Project-MONAI/MONAI
BertOutput
false
16,225
[ "Apache-2.0" ]
2,971
2bab12c67c3cc1d54a4847628ce1e879064be11c
https://github.com/Project-MONAI/MONAI/tree/2bab12c67c3cc1d54a4847628ce1e879064be11c
StyledConv
import math import torch from torch import nn from torch.nn import functional as F def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): rest_dim = [1] * (input.ndim - bias.ndim - 1) input = input if input.ndim == 3: return F.leaky_relu(input + bias.view(1, *rest_dim, bias.shape[0]), negative_slope=negative_slope) * scale else: return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=negative_slope) * scale def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): _, channel, in_h, in_w = input.shape input = input.reshape(-1, in_h, in_w, 1) _, in_h, in_w, minor = input.shape kernel_h, kernel_w = kernel.shape out = input.view(-1, in_h, 1, in_w, 1, minor) out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) out = out.view(-1, in_h * up_y, in_w * up_x, minor) out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]) out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(- pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :] out = out.permute(0, 3, 1, 2) out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) out = F.conv2d(out, w) out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1) out = out.permute(0, 2, 3, 1) out = out[:, ::down_y, ::down_x, :] out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 return out.view(-1, channel, out_h, out_w) def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1 ], pad[0], pad[1]) return out class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class FusedLeakyReLU(nn.Module): def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5): super().__init__() self.bias = nn.Parameter(torch.zeros(channel)) self.negative_slope = negative_slope self.scale = scale def forward(self, input): return fused_leaky_relu(input, self.bias, self.negative_slope, self .scale) class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style, input_is_stylespace=False): batch, in_channel, height, width = input.shape if not input_is_stylespace: style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out, style class NoiseInjection(nn.Module): def __init__(self): super().__init__() self.weight = nn.Parameter(torch.zeros(1)) def forward(self, image, noise=None): if noise is None: batch, _, height, width = image.shape noise = image.new_empty(batch, 1, height, width).normal_() return image + self.weight * noise class StyledConv(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, upsample=False, blur_kernel=[1, 3, 3, 1], demodulate=True): super().__init__() self.conv = ModulatedConv2d(in_channel, out_channel, kernel_size, style_dim, upsample=upsample, blur_kernel=blur_kernel, demodulate=demodulate) self.noise = NoiseInjection() self.activate = FusedLeakyReLU(out_channel) def forward(self, input, style, noise=None, input_is_stylespace=False): out, style = self.conv(input, style, input_is_stylespace= input_is_stylespace) out = self.noise(out, noise=noise) out = self.activate(out) return out, style def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math from torch import nn from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_per_fused_add_mul_pow_rsqrt_sum_2(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r5 = rindex x0 = xindex % 4 r3 = rindex // 16 x1 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (r5 + 64 * x0), xmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + 4 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = 0.125 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp4 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + x4, tmp12, xmask) tl.store(out_ptr0 + (r5 + 64 * x4), tmp13, xmask) @triton.jit def triton_poi_fused_add_leaky_relu_mul_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 25 x2 = xindex // 100 x1 = xindex // 25 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tl.load(in_ptr2 + (x0 + 25 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 0.0 tmp9 = tmp7 > tmp8 tmp10 = 0.2 tmp11 = tmp7 * tmp10 tmp12 = tl.where(tmp9, tmp7, tmp11) tmp13 = 1.4142135623730951 tmp14 = tmp12 * tmp13 tl.store(out_ptr0 + x3, tmp9, xmask) tl.store(out_ptr1 + x3, tmp14, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_6, (1,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf1 buf3 = buf0 del buf0 buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_per_fused_add_mul_pow_rsqrt_sum_2[grid(16)](buf4, primals_5, buf2, buf5, 16, 64, XBLOCK=8, num_warps=4, num_stages=1) buf6 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf6, (1, 16, 5, 5), (400, 25, 5, 1)) buf7 = empty_strided_cuda((4, 1, 5, 5), (25, 25, 5, 1), torch.float32) buf8 = torch.ops.aten.normal_functional.default(buf7) del buf7 buf9 = buf8 del buf8 buf10 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool) buf11 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32 ) triton_poi_fused_add_leaky_relu_mul_3[grid(400)](buf6, primals_6, buf9, primals_7, buf10, buf11, 400, XBLOCK=256, num_warps=4, num_stages=1) del buf6 del primals_6 del primals_7 return buf11, reinterpret_tensor(buf2, (4, 1, 4, 1, 1), (4, 4, 1, 1, 1), 0 ), primals_4, primals_5, reinterpret_tensor(buf2, (4, 1, 4, 1, 1), (4, 4, 1, 1, 1), 0), buf4, reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), ( 256, 16, 4, 1), 0), buf9, buf10 def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): rest_dim = [1] * (input.ndim - bias.ndim - 1) input = input if input.ndim == 3: return F.leaky_relu(input + bias.view(1, *rest_dim, bias.shape[0]), negative_slope=negative_slope) * scale else: return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=negative_slope) * scale def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): _, channel, in_h, in_w = input.shape input = input.reshape(-1, in_h, in_w, 1) _, in_h, in_w, minor = input.shape kernel_h, kernel_w = kernel.shape out = input.view(-1, in_h, 1, in_w, 1, minor) out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) out = out.view(-1, in_h * up_y, in_w * up_x, minor) out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]) out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(- pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :] out = out.permute(0, 3, 1, 2) out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) out = F.conv2d(out, w) out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1) out = out.permute(0, 2, 3, 1) out = out[:, ::down_y, ::down_x, :] out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 return out.view(-1, channel, out_h, out_w) def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1 ], pad[0], pad[1]) return out class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class FusedLeakyReLU(nn.Module): def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5): super().__init__() self.bias = nn.Parameter(torch.zeros(channel)) self.negative_slope = negative_slope self.scale = scale def forward(self, input): return fused_leaky_relu(input, self.bias, self.negative_slope, self .scale) class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style, input_is_stylespace=False): batch, in_channel, height, width = input.shape if not input_is_stylespace: style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out, style class NoiseInjection(nn.Module): def __init__(self): super().__init__() self.weight = nn.Parameter(torch.zeros(1)) def forward(self, image, noise=None): if noise is None: batch, _, height, width = image.shape noise = image.new_empty(batch, 1, height, width).normal_() return image + self.weight * noise class StyledConvNew(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, upsample=False, blur_kernel=[1, 3, 3, 1], demodulate=True): super().__init__() self.conv = ModulatedConv2d(in_channel, out_channel, kernel_size, style_dim, upsample=upsample, blur_kernel=blur_kernel, demodulate=demodulate) self.noise = NoiseInjection() self.activate = FusedLeakyReLU(out_channel) def forward(self, input_0, input_1): primals_5 = self.conv.weight primals_2 = self.conv.modulation.weight primals_3 = self.conv.modulation.bias primals_6 = self.noise.weight primals_7 = self.activate.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
ozmig77/StyleCLIP-1
StyledConv
false
16,226
[ "MIT" ]
2,732
57b887bba971ef86c107f4805785ce44fca3efef
https://github.com/ozmig77/StyleCLIP-1/tree/57b887bba971ef86c107f4805785ce44fca3efef
FieldAllTypeBilinear
import math import torch import torch.nn as nn import torch.utils.data from typing import Dict from typing import Tuple from abc import ABC from abc import abstractmethod class BaseLayer(nn.Module, ABC): """ Base Layer for the torecsys module """ def __init__(self, **kwargs): """ Initializer for BaseLayer Args: **kwargs: kwargs """ super(BaseLayer, self).__init__() @property @abstractmethod def inputs_size(self) ->Dict[str, Tuple[str, ...]]: """ Get inputs size of the layer Returns: Dict[str, Tuple[str, ...]]: dictionary of inputs_size """ raise NotImplementedError('not implemented') @property @abstractmethod def outputs_size(self) ->Dict[str, Tuple[str, ...]]: """ Get outputs size of the layer Returns: Dict[str, Tuple[str, ...]]: dictionary of outputs_size """ raise NotImplementedError('not implemented') class FieldAllTypeBilinear(BaseLayer): """ Applies a bilinear transformation to the incoming data: :math:`y = x_1 \\cdot W \\odot x_2 + b` Args: in1_features: size of each first input sample in2_features: size of each second input sample bias: If set to False, the layer will not learn an additive bias. Default: ``True`` Shape: - Input1: :math:`(N, *, H_{in1})` where :math:`H_{in1}=\\text{in1\\_features}` and :math:`*` means any number of additional dimensions. All but the last dimension of the inputs should be the same - Input2: :math:`(N, *, H_{in2})` where :math:`H_{in2}=\\text{in2\\_features}`. - Output: :math:`(N, *, H_{out})` where :math:`H_{out}=\\text{out\\_features}` and all but the last dimension are the same shape as the input Examples:: >>> m = FieldAllTypeBilinear(20, 20) >>> input1 = torch.randn(128, 10, 20) >>> input2 = torch.randn(128, 10, 3) >>> output = m(input1, input2) >>> print(output.size()) torch.Size([128, 10, 3]) """ @property def inputs_size(self) ->Dict[str, Tuple[str, ...]]: return {'inputs1': ('B', 'NC2', 'E'), 'inputs2': ('B', 'NC2', 'E')} @property def outputs_size(self) ->Dict[str, Tuple[str, ...]]: return {'outputs': ('B', 'NC2', 'E')} __constants__ = ['in1_features', 'in2_features', 'bias'] def __init__(self, in1_features, in2_features, bias=True): super(FieldAllTypeBilinear, self).__init__() self.in1_features = in1_features self.in2_features = in2_features self.weight = nn.Parameter(torch.Tensor(in1_features, in2_features)) if bias: self.bias = nn.Parameter(torch.Tensor(in2_features)) else: self.register_parameter('bias', nn.Parameter(torch.tensor([0]))) self.reset_parameters() def reset_parameters(self): bound = 1 / math.sqrt(self.weight.shape[0]) nn.init.uniform_(self.weight, -bound, bound) if self.bias is not None: nn.init.uniform_(self.bias, -bound, bound) def forward(self, input1, input2): output = torch.mul(torch.matmul(input1, self.weight), input2) if self.bias is not None: output += self.bias return output def extra_repr(self): return ( f'in1_features={self.in1_features}, in2_features={self.in2_features}, bias={self.bias is not None}' ) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in1_features': 4, 'in2_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn import torch.utils.data from typing import Dict from typing import Tuple from abc import ABC from abc import abstractmethod assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](buf1, primals_3, primals_4, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 return buf1, primals_3, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0) class BaseLayer(nn.Module, ABC): """ Base Layer for the torecsys module """ def __init__(self, **kwargs): """ Initializer for BaseLayer Args: **kwargs: kwargs """ super(BaseLayer, self).__init__() @property @abstractmethod def inputs_size(self) ->Dict[str, Tuple[str, ...]]: """ Get inputs size of the layer Returns: Dict[str, Tuple[str, ...]]: dictionary of inputs_size """ raise NotImplementedError('not implemented') @property @abstractmethod def outputs_size(self) ->Dict[str, Tuple[str, ...]]: """ Get outputs size of the layer Returns: Dict[str, Tuple[str, ...]]: dictionary of outputs_size """ raise NotImplementedError('not implemented') class FieldAllTypeBilinearNew(BaseLayer): """ Applies a bilinear transformation to the incoming data: :math:`y = x_1 \\cdot W \\odot x_2 + b` Args: in1_features: size of each first input sample in2_features: size of each second input sample bias: If set to False, the layer will not learn an additive bias. Default: ``True`` Shape: - Input1: :math:`(N, *, H_{in1})` where :math:`H_{in1}=\\text{in1\\_features}` and :math:`*` means any number of additional dimensions. All but the last dimension of the inputs should be the same - Input2: :math:`(N, *, H_{in2})` where :math:`H_{in2}=\\text{in2\\_features}`. - Output: :math:`(N, *, H_{out})` where :math:`H_{out}=\\text{out\\_features}` and all but the last dimension are the same shape as the input Examples:: >>> m = FieldAllTypeBilinear(20, 20) >>> input1 = torch.randn(128, 10, 20) >>> input2 = torch.randn(128, 10, 3) >>> output = m(input1, input2) >>> print(output.size()) torch.Size([128, 10, 3]) """ @property def inputs_size(self) ->Dict[str, Tuple[str, ...]]: return {'inputs1': ('B', 'NC2', 'E'), 'inputs2': ('B', 'NC2', 'E')} @property def outputs_size(self) ->Dict[str, Tuple[str, ...]]: return {'outputs': ('B', 'NC2', 'E')} __constants__ = ['in1_features', 'in2_features', 'bias'] def __init__(self, in1_features, in2_features, bias=True): super(FieldAllTypeBilinearNew, self).__init__() self.in1_features = in1_features self.in2_features = in2_features self.weight = nn.Parameter(torch.Tensor(in1_features, in2_features)) if bias: self.bias = nn.Parameter(torch.Tensor(in2_features)) else: self.register_parameter('bias', nn.Parameter(torch.tensor([0]))) self.reset_parameters() def reset_parameters(self): bound = 1 / math.sqrt(self.weight.shape[0]) nn.init.uniform_(self.weight, -bound, bound) if self.bias is not None: nn.init.uniform_(self.bias, -bound, bound) def extra_repr(self): return ( f'in1_features={self.in1_features}, in2_features={self.in2_features}, bias={self.bias is not None}' ) def forward(self, input_0, input_1): primals_1 = self.weight primals_4 = self.bias primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
p768lwy3/torecsys
FieldAllTypeBilinear
false
16,227
[ "MIT" ]
92
2251366268b4fbe6f8c3ab1628fa72a0db043dcd
https://github.com/p768lwy3/torecsys/tree/2251366268b4fbe6f8c3ab1628fa72a0db043dcd
ComplexCnnQAHead
import torch from torch import nn class ComplexCnnQAHead(nn.Module): def __init__(self, input_size): super().__init__() self.relu = nn.ReLU() self.conv_1 = nn.Conv1d(in_channels=input_size, out_channels=256, kernel_size=1, padding=0) self.conv_3 = nn.Conv1d(in_channels=input_size, out_channels=256, kernel_size=3, padding=1) self.conv_5 = nn.Conv1d(in_channels=input_size, out_channels=256, kernel_size=5, padding=2) self.fc = nn.Linear(768, 2) def forward(self, x): x = x.transpose(1, 2).contiguous() conv1_out = self.relu(self.conv_1(x).transpose(1, 2).contiguous(). squeeze(-1)) conv3_out = self.relu(self.conv_3(x).transpose(1, 2).contiguous(). squeeze(-1)) conv5_out = self.relu(self.conv_5(x).transpose(1, 2).contiguous(). squeeze(-1)) output = self.fc(torch.cat((conv1_out, conv3_out, conv5_out), -1)) return output def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 768 x1 = xindex // 768 % 4 x2 = xindex // 3072 x4 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 256, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x1 + 4 * x0 + 1024 * x2), tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 512, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tmp12 & tmp14 tmp16 = tl.load(in_ptr2 + (x1 + 4 * (-256 + x0) + 1024 * x2), tmp15, eviction_policy='evict_last', other=0.0) tmp17 = tl.load(in_ptr3 + (-256 + x0), tmp15, eviction_policy= 'evict_last', other=0.0) tmp18 = tmp16 + tmp17 tmp19 = triton_helpers.maximum(tmp8, tmp18) tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp15, tmp19, tmp20) tmp22 = tmp0 >= tmp13 tl.full([1], 768, tl.int64) tmp25 = tl.load(in_ptr4 + (x1 + 4 * (-512 + x0) + 1024 * x2), tmp22, eviction_policy='evict_last', other=0.0) tmp26 = tl.load(in_ptr5 + (-512 + x0), tmp22, eviction_policy= 'evict_last', other=0.0) tmp27 = tmp25 + tmp26 tmp28 = triton_helpers.maximum(tmp8, tmp27) tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype) tmp30 = tl.where(tmp22, tmp28, tmp29) tmp31 = tl.where(tmp15, tmp21, tmp30) tmp32 = tl.where(tmp4, tmp11, tmp31) tl.store(out_ptr0 + x4, tmp32, None) @triton.jit def triton_poi_fused_clone_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 1024 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 256 * y3), tmp6, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (256, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (256,), (1,)) assert_size_stride(primals_4, (256, 4, 3), (12, 3, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (256, 4, 5), (20, 5, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (2, 768), (768, 1)) assert_size_stride(primals_9, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 256, 4), (1024, 4, 1)) buf2 = extern_kernels.convolution(buf0, primals_4, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 256, 4), (1024, 4, 1)) buf3 = extern_kernels.convolution(buf0, primals_6, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf3, (4, 256, 4), (1024, 4, 1)) buf4 = empty_strided_cuda((4, 4, 768), (3072, 768, 1), torch.float32) triton_poi_fused_cat_1[grid(12288)](buf1, primals_3, buf2, primals_5, buf3, primals_7, buf4, 12288, XBLOCK=128, num_warps= 4, num_stages=1) buf5 = empty_strided_cuda((16, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf4, (16, 768), (768, 1), 0), reinterpret_tensor(primals_8, (768, 2), (1, 768), 0), alpha=1, beta=1, out=buf5) del primals_9 buf6 = empty_strided_cuda((4, 4, 256), (1024, 256, 1), torch.bool) triton_poi_fused_clone_relu_threshold_backward_2[grid(16, 256)](buf3, primals_7, buf6, 16, 256, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del buf3 del primals_7 buf7 = empty_strided_cuda((4, 4, 256), (1024, 256, 1), torch.bool) triton_poi_fused_clone_relu_threshold_backward_2[grid(16, 256)](buf2, primals_5, buf7, 16, 256, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del buf2 del primals_5 buf8 = empty_strided_cuda((4, 4, 256), (1024, 256, 1), torch.bool) triton_poi_fused_clone_relu_threshold_backward_2[grid(16, 256)](buf1, primals_3, buf8, 16, 256, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del buf1 del primals_3 return reinterpret_tensor(buf5, (4, 4, 2), (8, 2, 1), 0 ), primals_2, primals_4, primals_6, buf0, reinterpret_tensor(buf4, (16, 768), (768, 1), 0), primals_8, buf6, buf7, buf8 class ComplexCnnQAHeadNew(nn.Module): def __init__(self, input_size): super().__init__() self.relu = nn.ReLU() self.conv_1 = nn.Conv1d(in_channels=input_size, out_channels=256, kernel_size=1, padding=0) self.conv_3 = nn.Conv1d(in_channels=input_size, out_channels=256, kernel_size=3, padding=1) self.conv_5 = nn.Conv1d(in_channels=input_size, out_channels=256, kernel_size=5, padding=2) self.fc = nn.Linear(768, 2) def forward(self, input_0): primals_2 = self.conv_1.weight primals_3 = self.conv_1.bias primals_4 = self.conv_3.weight primals_5 = self.conv_3.bias primals_6 = self.conv_5.weight primals_7 = self.conv_5.bias primals_8 = self.fc.weight primals_9 = self.fc.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
park-sungmoo/odqa_baseline_code
ComplexCnnQAHead
false
16,228
[ "Apache-2.0" ]
67
45954be766e5f987bef18e5b8a2e47f1508742cd
https://github.com/park-sungmoo/odqa_baseline_code/tree/45954be766e5f987bef18e5b8a2e47f1508742cd
GlobalDiscriminator
import torch import torch.nn as nn import torch.nn.functional as F import torch.optim class GlobalDiscriminator(nn.Module): def __init__(self, y_size, M_channels): super().__init__() self.c0 = nn.Conv2d(M_channels, 64, kernel_size=3) self.c1 = nn.Conv2d(64, 32, kernel_size=3) self.avgpool = nn.AdaptiveAvgPool2d(16) self.l0 = nn.Linear(32 * 16 * 16 + y_size, 256) self.l1 = nn.Linear(256, 256) self.l2 = nn.Linear(256, 1) def forward(self, y, M): h = F.relu(self.c0(M)) h = self.c1(h) h = self.avgpool(h) h = h.view(M.shape[0], -1) h = torch.cat((y, h), dim=1) h = F.relu(self.l0(h)) h = F.relu(self.l1(h)) return self.l2(h) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {'y_size': 4, 'M_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 984064 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3844 % 64 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 3600 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused__adaptive_avg_pool2d_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 16 % 16 x0 = xindex % 16 x2 = xindex // 256 x4 = xindex tmp0 = 15 * x1 // 4 tmp1 = (75 + 60 * x1) // 16 tmp2 = tmp0 < tmp1 tmp3 = 15 * x0 // 4 tmp4 = (75 + 60 * x0) // 16 tmp5 = tmp3 < tmp4 tmp6 = tmp2 & tmp5 tmp7 = tl.load(in_ptr0 + (60 * (15 * x1 // 4) + 3600 * x2 + 15 * x0 // 4), tmp6, eviction_policy='evict_last', other=0.0) tmp8 = 1 + 15 * x0 // 4 tmp9 = tmp8 < tmp4 tmp10 = tmp2 & tmp9 tmp11 = tl.load(in_ptr0 + (1 + 60 * (15 * x1 // 4) + 3600 * x2 + 15 * x0 // 4), tmp10, eviction_policy='evict_last', other=0.0) tmp12 = tmp11 + tmp7 tmp13 = 2 + 15 * x0 // 4 tmp14 = tmp13 < tmp4 tmp15 = tmp2 & tmp14 tmp16 = tl.load(in_ptr0 + (2 + 60 * (15 * x1 // 4) + 3600 * x2 + 15 * x0 // 4), tmp15, eviction_policy='evict_last', other=0.0) tmp17 = tmp16 + tmp12 tmp18 = 3 + 15 * x0 // 4 tmp19 = tmp18 < tmp4 tmp20 = tmp2 & tmp19 tmp21 = tl.load(in_ptr0 + (3 + 60 * (15 * x1 // 4) + 3600 * x2 + 15 * x0 // 4), tmp20, eviction_policy='evict_last', other=0.0) tmp22 = tmp21 + tmp17 tmp23 = 4 + 15 * x0 // 4 tmp24 = tmp23 < tmp4 tmp25 = tmp2 & tmp24 tmp26 = tl.load(in_ptr0 + (4 + 60 * (15 * x1 // 4) + 3600 * x2 + 15 * x0 // 4), tmp25, eviction_policy='evict_last', other=0.0) tmp27 = tmp26 + tmp22 tmp28 = 1 + 15 * x1 // 4 tmp29 = tmp28 < tmp1 tmp30 = tmp29 & tmp5 tmp31 = tl.load(in_ptr0 + (60 + 60 * (15 * x1 // 4) + 3600 * x2 + 15 * x0 // 4), tmp30, eviction_policy='evict_last', other=0.0) tmp32 = tmp31 + tmp27 tmp33 = tmp29 & tmp9 tmp34 = tl.load(in_ptr0 + (61 + 60 * (15 * x1 // 4) + 3600 * x2 + 15 * x0 // 4), tmp33, eviction_policy='evict_last', other=0.0) tmp35 = tmp34 + tmp32 tmp36 = tmp29 & tmp14 tmp37 = tl.load(in_ptr0 + (62 + 60 * (15 * x1 // 4) + 3600 * x2 + 15 * x0 // 4), tmp36, eviction_policy='evict_last', other=0.0) tmp38 = tmp37 + tmp35 tmp39 = tmp29 & tmp19 tmp40 = tl.load(in_ptr0 + (63 + 60 * (15 * x1 // 4) + 3600 * x2 + 15 * x0 // 4), tmp39, eviction_policy='evict_last', other=0.0) tmp41 = tmp40 + tmp38 tmp42 = tmp29 & tmp24 tmp43 = tl.load(in_ptr0 + (64 + 60 * (15 * x1 // 4) + 3600 * x2 + 15 * x0 // 4), tmp42, eviction_policy='evict_last', other=0.0) tmp44 = tmp43 + tmp41 tmp45 = 2 + 15 * x1 // 4 tmp46 = tmp45 < tmp1 tmp47 = tmp46 & tmp5 tmp48 = tl.load(in_ptr0 + (120 + 60 * (15 * x1 // 4) + 3600 * x2 + 15 * x0 // 4), tmp47, eviction_policy='evict_last', other=0.0) tmp49 = tmp48 + tmp44 tmp50 = tmp46 & tmp9 tmp51 = tl.load(in_ptr0 + (121 + 60 * (15 * x1 // 4) + 3600 * x2 + 15 * x0 // 4), tmp50, eviction_policy='evict_last', other=0.0) tmp52 = tmp51 + tmp49 tmp53 = tmp46 & tmp14 tmp54 = tl.load(in_ptr0 + (122 + 60 * (15 * x1 // 4) + 3600 * x2 + 15 * x0 // 4), tmp53, eviction_policy='evict_last', other=0.0) tmp55 = tmp54 + tmp52 tmp56 = tmp46 & tmp19 tmp57 = tl.load(in_ptr0 + (123 + 60 * (15 * x1 // 4) + 3600 * x2 + 15 * x0 // 4), tmp56, eviction_policy='evict_last', other=0.0) tmp58 = tmp57 + tmp55 tmp59 = tmp46 & tmp24 tmp60 = tl.load(in_ptr0 + (124 + 60 * (15 * x1 // 4) + 3600 * x2 + 15 * x0 // 4), tmp59, eviction_policy='evict_last', other=0.0) tmp61 = tmp60 + tmp58 tmp62 = 3 + 15 * x1 // 4 tmp63 = tmp62 < tmp1 tmp64 = tmp63 & tmp5 tmp65 = tl.load(in_ptr0 + (180 + 60 * (15 * x1 // 4) + 3600 * x2 + 15 * x0 // 4), tmp64, eviction_policy='evict_last', other=0.0) tmp66 = tmp65 + tmp61 tmp67 = tmp63 & tmp9 tmp68 = tl.load(in_ptr0 + (181 + 60 * (15 * x1 // 4) + 3600 * x2 + 15 * x0 // 4), tmp67, eviction_policy='evict_last', other=0.0) tmp69 = tmp68 + tmp66 tmp70 = tmp63 & tmp14 tmp71 = tl.load(in_ptr0 + (182 + 60 * (15 * x1 // 4) + 3600 * x2 + 15 * x0 // 4), tmp70, eviction_policy='evict_last', other=0.0) tmp72 = tmp71 + tmp69 tmp73 = tmp63 & tmp19 tmp74 = tl.load(in_ptr0 + (183 + 60 * (15 * x1 // 4) + 3600 * x2 + 15 * x0 // 4), tmp73, eviction_policy='evict_last', other=0.0) tmp75 = tmp74 + tmp72 tmp76 = tmp63 & tmp24 tmp77 = tl.load(in_ptr0 + (184 + 60 * (15 * x1 // 4) + 3600 * x2 + 15 * x0 // 4), tmp76, eviction_policy='evict_last', other=0.0) tmp78 = tmp77 + tmp75 tmp79 = 4 + 15 * x1 // 4 tmp80 = tmp79 < tmp1 tmp81 = tmp80 & tmp5 tmp82 = tl.load(in_ptr0 + (240 + 60 * (15 * x1 // 4) + 3600 * x2 + 15 * x0 // 4), tmp81, eviction_policy='evict_last', other=0.0) tmp83 = tmp82 + tmp78 tmp84 = tmp80 & tmp9 tmp85 = tl.load(in_ptr0 + (241 + 60 * (15 * x1 // 4) + 3600 * x2 + 15 * x0 // 4), tmp84, eviction_policy='evict_last', other=0.0) tmp86 = tmp85 + tmp83 tmp87 = tmp80 & tmp14 tmp88 = tl.load(in_ptr0 + (242 + 60 * (15 * x1 // 4) + 3600 * x2 + 15 * x0 // 4), tmp87, eviction_policy='evict_last', other=0.0) tmp89 = tmp88 + tmp86 tmp90 = tmp80 & tmp19 tmp91 = tl.load(in_ptr0 + (243 + 60 * (15 * x1 // 4) + 3600 * x2 + 15 * x0 // 4), tmp90, eviction_policy='evict_last', other=0.0) tmp92 = tmp91 + tmp89 tmp93 = tmp80 & tmp24 tmp94 = tl.load(in_ptr0 + (244 + 60 * (15 * x1 // 4) + 3600 * x2 + 15 * x0 // 4), tmp93, eviction_policy='evict_last', other=0.0) tmp95 = tmp94 + tmp92 tmp96 = 1.0 tmp97 = tl.full(tmp96.shape, 0.0, tmp96.dtype) tmp98 = tl.where(tmp6, tmp96, tmp97) tmp99 = tl.where(tmp10, tmp96, tmp97) tmp100 = tmp99 + tmp98 tmp101 = tl.where(tmp15, tmp96, tmp97) tmp102 = tmp101 + tmp100 tmp103 = tl.where(tmp20, tmp96, tmp97) tmp104 = tmp103 + tmp102 tmp105 = tl.where(tmp25, tmp96, tmp97) tmp106 = tmp105 + tmp104 tmp107 = tl.where(tmp30, tmp96, tmp97) tmp108 = tmp107 + tmp106 tmp109 = tl.where(tmp33, tmp96, tmp97) tmp110 = tmp109 + tmp108 tmp111 = tl.where(tmp36, tmp96, tmp97) tmp112 = tmp111 + tmp110 tmp113 = tl.where(tmp39, tmp96, tmp97) tmp114 = tmp113 + tmp112 tmp115 = tl.where(tmp42, tmp96, tmp97) tmp116 = tmp115 + tmp114 tmp117 = tl.where(tmp47, tmp96, tmp97) tmp118 = tmp117 + tmp116 tmp119 = tl.where(tmp50, tmp96, tmp97) tmp120 = tmp119 + tmp118 tmp121 = tl.where(tmp53, tmp96, tmp97) tmp122 = tmp121 + tmp120 tmp123 = tl.where(tmp56, tmp96, tmp97) tmp124 = tmp123 + tmp122 tmp125 = tl.where(tmp59, tmp96, tmp97) tmp126 = tmp125 + tmp124 tmp127 = tl.where(tmp64, tmp96, tmp97) tmp128 = tmp127 + tmp126 tmp129 = tl.where(tmp67, tmp96, tmp97) tmp130 = tmp129 + tmp128 tmp131 = tl.where(tmp70, tmp96, tmp97) tmp132 = tmp131 + tmp130 tmp133 = tl.where(tmp73, tmp96, tmp97) tmp134 = tmp133 + tmp132 tmp135 = tl.where(tmp76, tmp96, tmp97) tmp136 = tmp135 + tmp134 tmp137 = tl.where(tmp81, tmp96, tmp97) tmp138 = tmp137 + tmp136 tmp139 = tl.where(tmp84, tmp96, tmp97) tmp140 = tmp139 + tmp138 tmp141 = tl.where(tmp87, tmp96, tmp97) tmp142 = tmp141 + tmp140 tmp143 = tl.where(tmp90, tmp96, tmp97) tmp144 = tmp143 + tmp142 tmp145 = tl.where(tmp93, tmp96, tmp97) tmp146 = tmp145 + tmp144 tmp147 = tmp95 / tmp146 tl.store(out_ptr0 + x4, tmp147, None) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8196 x1 = xindex // 8196 tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8196, tl.int64) tmp9 = tl.load(in_ptr1 + (8192 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x0 + 8224 * x1), tmp10, xmask) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (64, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (256, 8196), (8196, 1)) assert_size_stride(primals_8, (256,), (1,)) assert_size_stride(primals_9, (256, 256), (256, 1)) assert_size_stride(primals_10, (256,), (1,)) assert_size_stride(primals_11, (1, 256), (256, 1)) assert_size_stride(primals_12, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 62, 62), (246016, 3844, 62, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(984064)](buf1, primals_2, 984064, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 32, 60, 60), (115200, 3600, 60, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(460800)](buf3, primals_5, 460800, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.float32) triton_poi_fused__adaptive_avg_pool2d_2[grid(32768)](buf3, buf4, 32768, XBLOCK=128, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((4, 8196), (8224, 1), torch.float32) triton_poi_fused_cat_3[grid(32784)](primals_6, buf4, buf5, 32784, XBLOCK=512, num_warps=4, num_stages=1) del buf4 del primals_6 buf6 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(buf5, reinterpret_tensor(primals_7, (8196, 256), (1, 8196), 0), out=buf6) buf7 = buf6 del buf6 triton_poi_fused_relu_4[grid(1024)](buf7, primals_8, 1024, XBLOCK= 256, num_warps=4, num_stages=1) del primals_8 buf8 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(buf7, reinterpret_tensor(primals_9, (256, 256), ( 1, 256), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(1024)](buf9, primals_10, 1024, XBLOCK= 256, num_warps=4, num_stages=1) del primals_10 buf11 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_12, buf9, reinterpret_tensor( primals_11, (256, 1), (1, 256), 0), alpha=1, beta=1, out=buf11) del primals_12 return (buf11, primals_1, primals_3, primals_4, buf1, buf3, buf5, buf7, buf9, primals_11, primals_9, primals_7) class GlobalDiscriminatorNew(nn.Module): def __init__(self, y_size, M_channels): super().__init__() self.c0 = nn.Conv2d(M_channels, 64, kernel_size=3) self.c1 = nn.Conv2d(64, 32, kernel_size=3) self.avgpool = nn.AdaptiveAvgPool2d(16) self.l0 = nn.Linear(32 * 16 * 16 + y_size, 256) self.l1 = nn.Linear(256, 256) self.l2 = nn.Linear(256, 1) def forward(self, input_0, input_1): primals_1 = self.c0.weight primals_2 = self.c0.bias primals_4 = self.c1.weight primals_5 = self.c1.bias primals_7 = self.l0.weight primals_8 = self.l0.bias primals_9 = self.l1.weight primals_10 = self.l1.bias primals_11 = self.l2.weight primals_12 = self.l2.bias primals_6 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
neuralsyn/self-supervised-relational-reasoning
GlobalDiscriminator
false
16,229
[ "MIT" ]
130
6ecfafcf4a36c2eacef7ddd5bd1b23c28fbb14c8
https://github.com/neuralsyn/self-supervised-relational-reasoning/tree/6ecfafcf4a36c2eacef7ddd5bd1b23c28fbb14c8
AE
import torch import torch.nn.functional as F import torch.nn as nn import torch.nn.modules.loss class AE(nn.Module): """ Autoencoder for dimensional reduction""" def __init__(self, dim): super(AE, self).__init__() self.dim = dim self.fc1 = nn.Linear(dim, 512) self.fc2 = nn.Linear(512, 128) self.fc3 = nn.Linear(128, 512) self.fc4 = nn.Linear(512, dim) def encode(self, x): h1 = F.relu(self.fc1(x)) return F.relu(self.fc2(h1)) def decode(self, z): h3 = F.relu(self.fc3(z)) return torch.relu(self.fc4(h3)) def forward(self, x): z = self.encode(x.view(-1, self.dim)) return self.decode(z), z def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn.functional as F import torch.nn as nn import torch.nn.modules.loss assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (512, 4), (4, 1)) assert_size_stride(primals_3, (512,), (1,)) assert_size_stride(primals_4, (128, 512), (512, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (512, 128), (128, 1)) assert_size_stride(primals_7, (512,), (1,)) assert_size_stride(primals_8, (4, 512), (512, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 512), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(32768)](buf1, primals_3, 32768, XBLOCK =128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (512, 128), ( 1, 512), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(8192)](buf3, primals_5, 8192, XBLOCK= 128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 512), (512, 1), torch.float32) extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (128, 512), ( 1, 128), 0), out=buf4) buf5 = buf4 del buf4 triton_poi_fused_relu_0[grid(32768)](buf5, primals_7, 32768, XBLOCK =128, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf5, reinterpret_tensor(primals_8, (512, 4), (1, 512), 0), out=buf6) buf7 = buf6 del buf6 buf8 = empty_strided_cuda((64, 4), (4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(256)](buf7, primals_9, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 return buf7, buf3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), buf1, buf3, buf5, buf8, primals_8, primals_6, primals_4 class AENew(nn.Module): """ Autoencoder for dimensional reduction""" def __init__(self, dim): super(AENew, self).__init__() self.dim = dim self.fc1 = nn.Linear(dim, 512) self.fc2 = nn.Linear(512, 128) self.fc3 = nn.Linear(128, 512) self.fc4 = nn.Linear(512, dim) def encode(self, x): h1 = F.relu(self.fc1(x)) return F.relu(self.fc2(h1)) def decode(self, z): h3 = F.relu(self.fc3(z)) return torch.relu(self.fc4(h3)) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_8 = self.fc4.weight primals_9 = self.fc4.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
peterfeifanchen/scGNN
AE
false
16,230
[ "MIT" ]
60
4ef9013ad0f44f9f51708e9bb60e5138f5706593
https://github.com/peterfeifanchen/scGNN/tree/4ef9013ad0f44f9f51708e9bb60e5138f5706593
MaskL1Loss
import torch import torch.nn as nn class MaskL1Loss(nn.Module): """ Loss from paper <Pose Guided Person Image Generation> Sec3.1 pose mask loss """ def __init__(self, ratio=1): super(MaskL1Loss, self).__init__() self.criterion = nn.L1Loss() self.ratio = ratio def forward(self, generated_img, target_img, mask): pose_mask_l1 = self.criterion(generated_img * mask, target_img * mask) return self.criterion(generated_img, target_img ) + pose_mask_l1 * self.ratio def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp7 = tl.load(in_ptr2 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp8 = tmp0 * tmp7 tmp9 = tmp1 * tmp7 tmp10 = tmp8 - tmp9 tmp11 = tl_math.abs(tmp10) tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = 256.0 tmp16 = tmp6 / tmp15 tmp17 = tmp14 / tmp15 tmp18 = 1.0 tmp19 = tmp17 * tmp18 tmp20 = tmp16 + tmp19 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_add_mean_mul_sub_0[grid(1)](buf2, arg0_1, arg2_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf2, class MaskL1LossNew(nn.Module): """ Loss from paper <Pose Guided Person Image Generation> Sec3.1 pose mask loss """ def __init__(self, ratio=1): super(MaskL1LossNew, self).__init__() self.criterion = nn.L1Loss() self.ratio = ratio def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
pasan1992/Human-Pose-Transfer
MaskL1Loss
false
16,231
[ "MIT" ]
64
a7febc632d4fbf627ba05740d2048accb25575f2
https://github.com/pasan1992/Human-Pose-Transfer/tree/a7febc632d4fbf627ba05740d2048accb25575f2
BertMixedLayer
from _paritybench_helpers import _mock_config import math import torch import torch.nn import torch.nn as nn class BertAttention(nn.Module): """BERT attention layer. Based on: BERT (pytorch-transformer) https://github.com/huggingface/transformers """ def __init__(self, config) ->None: super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, context): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(context) mixed_value_layer = self.value(context) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_probs = self.dropout(nn.Softmax(dim=-1)(attention_scores)) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class BertOutput(nn.Module): """BERT output layer. Based on: BERT (pytorch-transformer) https://github.com/huggingface/transformers """ def __init__(self, config) ->None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = torch.nn.LayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertMixedLayer(nn.Module): """BERT cross attention layer. Based on: BERT (pytorch-transformer) https://github.com/huggingface/transformers """ def __init__(self, config) ->None: super().__init__() self.att = BertAttention(config) self.output = BertOutput(config) def forward(self, x, y): output = self.att(x, y) return self.output(output, x) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(num_attention_heads=4, hidden_size= 4, attention_probs_dropout_prob=0.5, hidden_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + x2, xmask) tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = float('-inf') tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4 != 0 tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = tmp9 != 0 tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = tmp15 != 0 tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = tmp21 != 0 tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-12 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 triton_poi_fused_3[grid(16, 4)](buf2, primals_8, buf8, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_8 buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0) del buf9 extern_kernels.addmm(primals_10, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf11) del primals_10 buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_5[grid(16)](buf11, primals_3, buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_6[grid(64)](buf11, primals_3, buf12, buf13, primals_11, primals_12, buf14, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf12 del buf13 del primals_12 return buf14, primals_3, primals_11, reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0 ), reinterpret_tensor(buf10, (16, 4), (4, 1), 0), buf11, primals_9 class BertAttention(nn.Module): """BERT attention layer. Based on: BERT (pytorch-transformer) https://github.com/huggingface/transformers """ def __init__(self, config) ->None: super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, context): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(context) mixed_value_layer = self.value(context) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_probs = self.dropout(nn.Softmax(dim=-1)(attention_scores)) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class BertOutput(nn.Module): """BERT output layer. Based on: BERT (pytorch-transformer) https://github.com/huggingface/transformers """ def __init__(self, config) ->None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = torch.nn.LayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertMixedLayerNew(nn.Module): """BERT cross attention layer. Based on: BERT (pytorch-transformer) https://github.com/huggingface/transformers """ def __init__(self, config) ->None: super().__init__() self.att = BertAttention(config) self.output = BertOutput(config) def forward(self, input_0, input_1): primals_1 = self.att.query.weight primals_2 = self.att.query.bias primals_4 = self.att.key.weight primals_5 = self.att.key.bias primals_7 = self.att.value.weight primals_8 = self.att.value.bias primals_9 = self.output.dense.weight primals_10 = self.output.dense.bias primals_11 = self.output.LayerNorm.weight primals_12 = self.output.LayerNorm.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
Project-MONAI/MONAI
BertMixedLayer
false
16,232
[ "Apache-2.0" ]
2,971
2bab12c67c3cc1d54a4847628ce1e879064be11c
https://github.com/Project-MONAI/MONAI/tree/2bab12c67c3cc1d54a4847628ce1e879064be11c
CBOWClassifier
import torch from torch import nn class CBOWClassifier(nn.Module): """ Continuous bag of words classifier. """ def __init__(self, hidden_size, input_size, max_pool, dropout=0.5): """ :param hidden_size: :param input_size: :param max_pool: if true then max pool over word embeddings, else sum word embeddings """ super(CBOWClassifier, self).__init__() self.hidden_size = hidden_size self.input_size = input_size self.max_pool = max_pool self.dropout = nn.Dropout(p=dropout) self.i2h = nn.Linear(self.input_size, self.hidden_size) self.h2o = nn.Linear(self.hidden_size, 1) self.sigmoid = nn.Sigmoid() self.tanh = nn.Tanh() def forward(self, x): if self.max_pool: encoding = nn.functional.max_pool1d(x.transpose(1, 2), x.shape[1]) encoding = encoding.transpose(1, 2).squeeze() else: encoding = x.sum(1) encoding = self.dropout(encoding) hidden = self.tanh(self.dropout(self.i2h(encoding))) out = self.sigmoid(self.h2o(hidden)) return out def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4, 'input_size': 4, 'max_pool': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = buf1 del buf1 triton_poi_fused_tanh_1[grid(16)](buf2, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf3 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 1), (1, 4 ), 0), out=buf3) buf4 = buf3 del buf3 triton_poi_fused_sigmoid_2[grid(4)](buf4, primals_5, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_5 return buf4, reinterpret_tensor(buf0, (4, 4), (4, 1), 0 ), buf2, buf4, primals_4 class CBOWClassifierNew(nn.Module): """ Continuous bag of words classifier. """ def __init__(self, hidden_size, input_size, max_pool, dropout=0.5): """ :param hidden_size: :param input_size: :param max_pool: if true then max pool over word embeddings, else sum word embeddings """ super(CBOWClassifierNew, self).__init__() self.hidden_size = hidden_size self.input_size = input_size self.max_pool = max_pool self.dropout = nn.Dropout(p=dropout) self.i2h = nn.Linear(self.input_size, self.hidden_size) self.h2o = nn.Linear(self.hidden_size, 1) self.sigmoid = nn.Sigmoid() self.tanh = nn.Tanh() def forward(self, input_0): primals_2 = self.i2h.weight primals_3 = self.i2h.bias primals_4 = self.h2o.weight primals_5 = self.h2o.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
nyu-mll/CoLA-baselines
CBOWClassifier
false
16,233
[ "MIT" ]
54
dd095d3646ed05a315280aaa8ed4ec84ba435b3e
https://github.com/nyu-mll/CoLA-baselines/tree/dd095d3646ed05a315280aaa8ed4ec84ba435b3e
DHead
import torch import torch.nn as nn from math import * class DHead(nn.Module): def __init__(self): super().__init__() self.conv = nn.Conv2d(256, 1, 4) def forward(self, x): output = torch.sigmoid(self.conv(x)) return output def get_inputs(): return [torch.rand([4, 256, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from math import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 14884 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 256, 4, 4), (4096, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 256, 64, 64), (1048576, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 61, 61), (3721, 3721, 61, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_sigmoid_0[grid(14884)](buf1, primals_2, 14884, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3, buf1 class DHeadNew(nn.Module): def __init__(self): super().__init__() self.conv = nn.Conv2d(256, 1, 4) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
pengyuzhang97/NIID-Bench
DHead
false
16,234
[ "MIT" ]
124
235b6f5c2bf218a587f9effae346a2f616de1855
https://github.com/pengyuzhang97/NIID-Bench/tree/235b6f5c2bf218a587f9effae346a2f616de1855
IcosahedronUnpool
import math import torch from torch import nn import torch.nn.functional as F class IcosahedronUnpool(nn.Module): """Isocahedron Unpooling, consists in adding 1 values to match the desired un pooling size """ def forward(self, x): """Forward calculates the subset of pixels that will result from the unpooling kernel_size and then adds 1 valued pixels to match this size Args: x (:obj:`torch.tensor`) : [batch x pixels x features] Returns: [:obj:`torch.tensor`]: [batch x pixels unpooled x features] """ M = x.size(1) order = int(math.log((M - 2) / 10) / math.log(4)) unpool_order = order + 1 additional_pixels = int(10 * math.pow(4, unpool_order) + 2) subset_pixels_add = additional_pixels - M return F.pad(x, (0, 0, 0, subset_pixels_add, 0, 0), 'constant', value=1 ) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 12 x2 = xindex // 48 x3 = xindex % 48 x4 = xindex tmp0 = x1 tmp1 = tl.full([1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.load(in_ptr0 + (x3 + 16 * x2), tmp2 & xmask, other=1.0) tl.store(out_ptr0 + x4, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 12, 4), (192, 48, 4, 1), torch.float32 ) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(768)](arg0_1, buf0, 768, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class IcosahedronUnpoolNew(nn.Module): """Isocahedron Unpooling, consists in adding 1 values to match the desired un pooling size """ def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
phil-hawkins/deepsphere-pytorch
IcosahedronUnpool
false
16,235
[ "MIT" ]
99
f23c531445b3ddf234c7e98cdadb010163051e6d
https://github.com/phil-hawkins/deepsphere-pytorch/tree/f23c531445b3ddf234c7e98cdadb010163051e6d
MultiHeadedAttention
import math import torch from typing import Optional from typing import Tuple from torch import nn class MultiHeadedAttention(nn.Module): """Multi-Head Attention layer. Args: n_head (int): The number of heads. n_feat (int): The number of features. dropout_rate (float): Dropout rate. """ def __init__(self, n_head: 'int', n_feat: 'int', dropout_rate: 'float'): """Construct an MultiHeadedAttention object.""" super().__init__() assert n_feat % n_head == 0 self.d_k = n_feat // n_head self.h = n_head self.linear_q = nn.Linear(n_feat, n_feat) self.linear_k = nn.Linear(n_feat, n_feat) self.linear_v = nn.Linear(n_feat, n_feat) self.linear_out = nn.Linear(n_feat, n_feat) self.dropout = nn.Dropout(p=dropout_rate) def forward_qkv(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """Transform query, key and value. Args: query (torch.Tensor): Query tensor (#batch, time1, size). key (torch.Tensor): Key tensor (#batch, time2, size). value (torch.Tensor): Value tensor (#batch, time2, size). Returns: torch.Tensor: Transformed query tensor, size (#batch, n_head, time1, d_k). torch.Tensor: Transformed key tensor, size (#batch, n_head, time2, d_k). torch.Tensor: Transformed value tensor, size (#batch, n_head, time2, d_k). """ n_batch = query.size(0) q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k) k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k) v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k) q = q.transpose(1, 2) k = k.transpose(1, 2) v = v.transpose(1, 2) return q, k, v def forward_attention(self, value: 'torch.Tensor', scores: 'torch.Tensor', mask: 'Optional[torch.Tensor]') ->torch.Tensor: """Compute attention context vector. Args: value (torch.Tensor): Transformed value, size (#batch, n_head, time2, d_k). scores (torch.Tensor): Attention score, size (#batch, n_head, time1, time2). mask (torch.Tensor): Mask, size (#batch, 1, time2) or (#batch, time1, time2). Returns: torch.Tensor: Transformed value (#batch, time1, d_model) weighted by the attention score (#batch, time1, time2). """ n_batch = value.size(0) if mask is not None: mask = mask.unsqueeze(1).eq(0) scores = scores.masked_fill(mask, -float('inf')) attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0) else: attn = torch.softmax(scores, dim=-1) p_attn = self.dropout(attn) x = torch.matmul(p_attn, value) x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k) return self.linear_out(x) def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor', mask: 'Optional[torch.Tensor]', pos_emb: 'torch.Tensor'=torch.empty(0)) ->torch.Tensor: """Compute scaled dot product attention. Args: query (torch.Tensor): Query tensor (#batch, time1, size). key (torch.Tensor): Key tensor (#batch, time2, size). value (torch.Tensor): Value tensor (#batch, time2, size). mask (torch.Tensor): Mask tensor (#batch, 1, time2) or (#batch, time1, time2). 1.When applying cross attention between decoder and encoder, the batch padding mask for input is in (#batch, 1, T) shape. 2.When applying self attention of encoder, the mask is in (#batch, T, T) shape. 3.When applying self attention of decoder, the mask is in (#batch, L, L) shape. 4.If the different position in decoder see different block of the encoder, such as Mocha, the passed in mask could be in (#batch, L, T) shape. But there is no such case in current Wenet. Returns: torch.Tensor: Output tensor (#batch, time1, d_model). """ q, k, v = self.forward_qkv(query, key, value) scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k) return self.forward_attention(v, scores, mask) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'n_head': 4, 'n_feat': 4, 'dropout_rate': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from typing import Optional from typing import Tuple from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_eq_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_div_masked_fill_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (4 * x0 + 16 * x2), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy ='evict_last').to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last').to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last').to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = float('-inf') tmp5 = tl.where(tmp0, tmp4, tmp3) tmp8 = tmp7 * tmp2 tmp9 = tl.where(tmp6, tmp4, tmp8) tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tmp12 * tmp2 tmp14 = tl.where(tmp11, tmp4, tmp13) tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tmp17 * tmp2 tmp19 = tl.where(tmp16, tmp4, tmp18) tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp9 - tmp20 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tmp14 - tmp20 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tmp19 - tmp20 tmp30 = tl_math.exp(tmp29) tmp31 = tmp28 + tmp30 tl.store(out_ptr0 + x3, tmp20, xmask) tl.store(out_ptr1 + x3, tmp31, xmask) @triton.jit def triton_poi_fused__softmax_div_masked_fill_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 64 x4 = xindex % 16 x5 = xindex x6 = xindex // 4 tmp0 = tl.load(in_ptr0 + (x4 + 16 * x3), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + x5, xmask) tmp6 = tl.load(in_ptr2 + x6, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr3 + x6, xmask, eviction_policy='evict_last') tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = float('-inf') tmp5 = tl.where(tmp0, tmp4, tmp3) tmp7 = tmp5 - tmp6 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 / tmp9 tmp11 = 0.0 tmp12 = tl.where(tmp0, tmp11, tmp10) tl.store(out_ptr0 + x5, tmp12, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf3, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool) triton_poi_fused_eq_1[grid(64)](primals_10, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_10 buf7 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf1 buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_div_masked_fill_2[grid(64)](buf6, buf5, buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_div_masked_fill_3[grid(256)](buf6, buf5, buf7, buf8, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1) buf10 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf8 triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_8, buf10, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_8 buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf7 triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0) del buf11 extern_kernels.addmm(primals_12, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_12 return reinterpret_tensor(buf13, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0 ), buf5, buf6, reinterpret_tensor(buf12, (16, 4), (4, 1), 0 ), primals_11, reinterpret_tensor(buf9, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0) class MultiHeadedAttentionNew(nn.Module): """Multi-Head Attention layer. Args: n_head (int): The number of heads. n_feat (int): The number of features. dropout_rate (float): Dropout rate. """ def __init__(self, n_head: 'int', n_feat: 'int', dropout_rate: 'float'): """Construct an MultiHeadedAttention object.""" super().__init__() assert n_feat % n_head == 0 self.d_k = n_feat // n_head self.h = n_head self.linear_q = nn.Linear(n_feat, n_feat) self.linear_k = nn.Linear(n_feat, n_feat) self.linear_v = nn.Linear(n_feat, n_feat) self.linear_out = nn.Linear(n_feat, n_feat) self.dropout = nn.Dropout(p=dropout_rate) def forward_qkv(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """Transform query, key and value. Args: query (torch.Tensor): Query tensor (#batch, time1, size). key (torch.Tensor): Key tensor (#batch, time2, size). value (torch.Tensor): Value tensor (#batch, time2, size). Returns: torch.Tensor: Transformed query tensor, size (#batch, n_head, time1, d_k). torch.Tensor: Transformed key tensor, size (#batch, n_head, time2, d_k). torch.Tensor: Transformed value tensor, size (#batch, n_head, time2, d_k). """ n_batch = query.size(0) q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k) k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k) v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k) q = q.transpose(1, 2) k = k.transpose(1, 2) v = v.transpose(1, 2) return q, k, v def forward_attention(self, value: 'torch.Tensor', scores: 'torch.Tensor', mask: 'Optional[torch.Tensor]') ->torch.Tensor: """Compute attention context vector. Args: value (torch.Tensor): Transformed value, size (#batch, n_head, time2, d_k). scores (torch.Tensor): Attention score, size (#batch, n_head, time1, time2). mask (torch.Tensor): Mask, size (#batch, 1, time2) or (#batch, time1, time2). Returns: torch.Tensor: Transformed value (#batch, time1, d_model) weighted by the attention score (#batch, time1, time2). """ n_batch = value.size(0) if mask is not None: mask = mask.unsqueeze(1).eq(0) scores = scores.masked_fill(mask, -float('inf')) attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0) else: attn = torch.softmax(scores, dim=-1) p_attn = self.dropout(attn) x = torch.matmul(p_attn, value) x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k) return self.linear_out(x) def forward(self, input_0, input_1, input_2, input_3): primals_2 = self.linear_q.weight primals_3 = self.linear_q.bias primals_4 = self.linear_k.weight primals_5 = self.linear_k.bias primals_7 = self.linear_v.weight primals_8 = self.linear_v.bias primals_11 = self.linear_out.weight primals_12 = self.linear_out.bias primals_1 = input_0 primals_6 = input_1 primals_9 = input_2 primals_10 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
pengchengguo/wenet
MultiHeadedAttention
false
16,236
[ "Apache-2.0" ]
1,166
940dc164e5cfa9b8c0131688f0f9457af9563892
https://github.com/pengchengguo/wenet/tree/940dc164e5cfa9b8c0131688f0f9457af9563892
AppendCLSToken
import math import torch from torch import Tensor import torch.nn as nn class BaseEmbeddingLayer(nn.Module): def _apply_initialization(self, x: 'Tensor', d: 'int', method: 'str' ) ->None: d_sqrt_inv = 1 / math.sqrt(d) if method == 'uniform': nn.init.uniform_(x, a=-d_sqrt_inv, b=d_sqrt_inv) elif method == 'normal': nn.init.normal_(x, std=d_sqrt_inv) else: raise ValueError(f'initialization: {method} is not implemented') class AppendCLSToken(BaseEmbeddingLayer): def __init__(self, d_token: 'int', initialization: 'str'='uniform') ->None: super().__init__() self.weight = nn.Parameter(Tensor(d_token)) self._apply_initialization(self.weight, d_token, initialization) def forward(self, x: 'Tensor') ->Tensor: assert x.ndim == 3 return torch.cat([x, self.weight.view(1, 1, -1).repeat(len(x), 1, 1 )], dim=1) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_token': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math from torch import Tensor import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 5 x0 = xindex % 4 x2 = xindex // 20 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 5, tl.int64) tmp9 = tl.load(in_ptr1 + x0, tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 5, 4), (20, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(80)](primals_1, primals_2, buf0, 80, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, class BaseEmbeddingLayer(nn.Module): def _apply_initialization(self, x: 'Tensor', d: 'int', method: 'str' ) ->None: d_sqrt_inv = 1 / math.sqrt(d) if method == 'uniform': nn.init.uniform_(x, a=-d_sqrt_inv, b=d_sqrt_inv) elif method == 'normal': nn.init.normal_(x, std=d_sqrt_inv) else: raise ValueError(f'initialization: {method} is not implemented') class AppendCLSTokenNew(BaseEmbeddingLayer): def __init__(self, d_token: 'int', initialization: 'str'='uniform') ->None: super().__init__() self.weight = nn.Parameter(Tensor(d_token)) self._apply_initialization(self.weight, d_token, initialization) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
pfnet-research/deep-table
AppendCLSToken
false
16,237
[ "MIT" ]
48
a19c0c3048484017d5f24806604c3b3470bcf550
https://github.com/pfnet-research/deep-table/tree/a19c0c3048484017d5f24806604c3b3470bcf550
CnnQAHead
import torch from torch import nn class CnnQAHead(nn.Module): def __init__(self, input_size): super().__init__() self.conv_1 = nn.Conv1d(in_channels=input_size, out_channels=2, kernel_size=1, padding=0) self.conv_3 = nn.Conv1d(in_channels=input_size, out_channels=2, kernel_size=3, padding=1) self.conv_5 = nn.Conv1d(in_channels=input_size, out_channels=2, kernel_size=5, padding=2) self.relu = nn.ReLU() def forward(self, x): x = x.transpose(1, 2).contiguous() conv1_out = self.relu(self.conv_1(x).transpose(1, 2).contiguous(). squeeze(-1)) conv3_out = self.relu(self.conv_3(x).transpose(1, 2).contiguous(). squeeze(-1)) conv5_out = self.relu(self.conv_5(x).transpose(1, 2).contiguous(). squeeze(-1)) x = conv1_out + conv3_out + conv5_out return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_clone_relu_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 8 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 2 y1 = yindex // 2 tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (x2 + 4 * y3), xmask & ymask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr3 + y0, ymask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x2 + 4 * y3), xmask & ymask, eviction_policy ='evict_last') tmp11 = tl.load(in_ptr5 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp7 = tmp5 + tmp6 tmp8 = triton_helpers.maximum(tmp3, tmp7) tmp9 = tmp4 + tmp8 tmp12 = tmp10 + tmp11 tmp13 = triton_helpers.maximum(tmp3, tmp12) tmp14 = tmp9 + tmp13 tl.store(out_ptr0 + (y0 + 2 * x2 + 8 * y1), tmp14, xmask & ymask) @triton.jit def triton_poi_fused_clone_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 2 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 8 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 2 * y3), tmp6, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (2, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (2,), (1,)) assert_size_stride(primals_4, (2, 4, 3), (12, 3, 1)) assert_size_stride(primals_5, (2,), (1,)) assert_size_stride(primals_6, (2, 4, 5), (20, 5, 1)) assert_size_stride(primals_7, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 2, 4), (8, 4, 1)) buf2 = extern_kernels.convolution(buf0, primals_4, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 2, 4), (8, 4, 1)) buf3 = extern_kernels.convolution(buf0, primals_6, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf3, (4, 2, 4), (8, 4, 1)) buf4 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) triton_poi_fused_add_clone_relu_1[grid(8, 4)](buf1, primals_3, buf2, primals_5, buf3, primals_7, buf4, 8, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.bool) triton_poi_fused_clone_relu_threshold_backward_2[grid(16, 2)](buf3, primals_7, buf5, 16, 2, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del buf3 del primals_7 buf6 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.bool) triton_poi_fused_clone_relu_threshold_backward_2[grid(16, 2)](buf2, primals_5, buf6, 16, 2, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del buf2 del primals_5 buf7 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.bool) triton_poi_fused_clone_relu_threshold_backward_2[grid(16, 2)](buf1, primals_3, buf7, 16, 2, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del buf1 del primals_3 return buf4, primals_2, primals_4, primals_6, buf0, buf5, buf6, buf7 class CnnQAHeadNew(nn.Module): def __init__(self, input_size): super().__init__() self.conv_1 = nn.Conv1d(in_channels=input_size, out_channels=2, kernel_size=1, padding=0) self.conv_3 = nn.Conv1d(in_channels=input_size, out_channels=2, kernel_size=3, padding=1) self.conv_5 = nn.Conv1d(in_channels=input_size, out_channels=2, kernel_size=5, padding=2) self.relu = nn.ReLU() def forward(self, input_0): primals_2 = self.conv_1.weight primals_3 = self.conv_1.bias primals_4 = self.conv_3.weight primals_5 = self.conv_3.bias primals_6 = self.conv_5.weight primals_7 = self.conv_5.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
park-sungmoo/odqa_baseline_code
CnnQAHead
false
16,238
[ "Apache-2.0" ]
67
45954be766e5f987bef18e5b8a2e47f1508742cd
https://github.com/park-sungmoo/odqa_baseline_code/tree/45954be766e5f987bef18e5b8a2e47f1508742cd
CoverageAttention
from _paritybench_helpers import _mock_config import torch from torch import nn import torch.nn.functional as F class CoverageAttention(nn.Module): def __init__(self, config: 'SARGConfig'): super(CoverageAttention, self).__init__() self.linear_h = nn.Linear(config.hidden_size, config.hidden_size) self.linear_K = nn.Linear(config.hidden_size, config.hidden_size) self.linear_cov = nn.Linear(1, config.hidden_size) self.v = nn.Linear(config.hidden_size, 1) self.register_buffer('masked_bias', torch.tensor(-10000.0)) def forward(self, h, K, cov, mask=None): """ :param K: (batch_size, src_len, hidden_size) :param h: (batch_size, hidden_size) :param cov: (batch_size, src_len) :param mask: :return: """ h_l = self.linear_h(h.unsqueeze(1)) K_l = self.linear_K(K) c_l = self.linear_cov(cov.unsqueeze(2)) e = self.v(torch.tanh(h_l + K_l + c_l)).squeeze(-1) if mask is not None: e = e + mask * self.masked_bias a = F.softmax(e, dim=-1).unsqueeze(1) out = torch.matmul(a, K).squeeze(1) a = a.squeeze(1) cov = cov + a return out, a, cov def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex % 16 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_out_ptr0 + x4, xmask) tmp8 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp11 = libdevice.tanh(tmp10) tl.store(in_out_ptr0 + x4, tmp11, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_add_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + x2, xmask) tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp10 = tmp9 + tmp8 tl.store(out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr1 + x2, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, 1), (1, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (1, 4), (4, 1)) assert_size_stride(primals_11, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_6, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_7, (16, 1), (1, 1), 0), reinterpret_tensor(primals_8, (1, 4), (1, 1), 0), out=buf2) del primals_8 buf3 = reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0) del buf2 get_raw_stream(0) triton_poi_fused_add_tanh_0[grid(64)](buf3, buf0, primals_3, buf1, primals_5, primals_9, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 del primals_5 del primals_9 buf5 = reinterpret_tensor(buf1, (16, 1), (1, 1), 0) del buf1 extern_kernels.addmm(primals_11, reinterpret_tensor(buf3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_11 buf6 = buf0 del buf0 triton_poi_fused__softmax_1[grid(16)](buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = reinterpret_tensor(buf5, (4, 4), (4, 1), 0) del buf5 buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_add_2[grid(16)](buf6, primals_7, buf7, buf9, 16, XBLOCK=16, num_warps=1, num_stages=1) buf8 = buf6 del buf6 extern_kernels.mm(buf7, primals_6, out=buf8) return buf8, buf7, buf9, primals_6, primals_1, reinterpret_tensor(primals_7 , (16, 1), (1, 1), 0), buf3, buf7, primals_10 class CoverageAttentionNew(nn.Module): def __init__(self, config: 'SARGConfig'): super(CoverageAttentionNew, self).__init__() self.linear_h = nn.Linear(config.hidden_size, config.hidden_size) self.linear_K = nn.Linear(config.hidden_size, config.hidden_size) self.linear_cov = nn.Linear(1, config.hidden_size) self.v = nn.Linear(config.hidden_size, 1) self.register_buffer('masked_bias', torch.tensor(-10000.0)) def forward(self, input_0, input_1, input_2): primals_1 = self.linear_h.weight primals_3 = self.linear_h.bias primals_2 = self.linear_K.weight primals_5 = self.linear_K.bias primals_8 = self.linear_cov.weight primals_9 = self.linear_cov.bias primals_10 = self.v.weight primals_11 = self.v.bias primals_4 = input_0 primals_6 = input_1 primals_7 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0], output[1], output[2]
NetEase-GameAI/SARG
CoverageAttention
false
16,239
[ "BSD-3-Clause" ]
53
037085794f10439c4e52f57ab0fa042f35d03f62
https://github.com/NetEase-GameAI/SARG/tree/037085794f10439c4e52f57ab0fa042f35d03f62
HealpixAvgPool
import torch from torch import nn import torch.nn.functional as F class HealpixAvgPool(nn.AvgPool1d): """Healpix Average pooling module """ def __init__(self): """initialization """ super().__init__(kernel_size=4) def forward(self, x): """forward call the 1d Averagepooling of pytorch Arguments: x (:obj:`torch.tensor`): [batch x pixels x features] Returns: [:obj:`torch.tensor`] : [batch x pooled pixels x features] """ x = x.permute(0, 2, 1) x = F.avg_pool1d(x, self.kernel_size) x = x.permute(0, 2, 1) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0), class HealpixAvgPoolNew(nn.AvgPool1d): """Healpix Average pooling module """ def __init__(self): """initialization """ super().__init__(kernel_size=4) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
phil-hawkins/deepsphere-pytorch
HealpixAvgPool
false
16,240
[ "MIT" ]
99
f23c531445b3ddf234c7e98cdadb010163051e6d
https://github.com/phil-hawkins/deepsphere-pytorch/tree/f23c531445b3ddf234c7e98cdadb010163051e6d
RBFExpansion
import torch import numpy as np import torch.nn as nn class RBFExpansion(nn.Module): """Expand distances between nodes by radial basis functions. .. math:: \\exp(- \\gamma * ||d - \\mu||^2) where :math:`d` is the distance between two nodes and :math:`\\mu` helps centralizes the distances. We use multiple centers evenly distributed in the range of :math:`[\\text{low}, \\text{high}]` with the difference between two adjacent centers being :math:`gap`. The number of centers is decided by :math:`(\\text{high} - \\text{low}) / \\text{gap}`. Choosing fewer centers corresponds to reducing the resolution of the filter. Parameters ---------- low : float Smallest center. Default to 0. high : float Largest center. Default to 30. gap : float Difference between two adjacent centers. :math:`\\gamma` will be computed as the reciprocal of gap. Default to 0.1. """ def __init__(self, low=0.0, high=30.0, gap=0.1): super(RBFExpansion, self).__init__() num_centers = int(np.ceil((high - low) / gap)) self.centers = np.linspace(low, high, num_centers) self.centers = nn.Parameter(torch.tensor(self.centers).float(), requires_grad=False) self.gamma = 1 / gap def reset_parameters(self): """Reinitialize model parameters.""" self.centers = nn.Parameter(self.centers.clone().detach().float(), requires_grad=False) def forward(self, edge_dists): """Expand distances. Parameters ---------- edge_dists : float32 tensor of shape (E, 1) Distances between end nodes of edges, E for the number of edges. Returns ------- float32 tensor of shape (E, len(self.centers)) Expanded distances. """ radial = edge_dists - self.centers coef = -self.gamma return torch.exp(coef * radial ** 2) def get_inputs(): return [torch.rand([4, 4, 4, 300])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_exp_mul_pow_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 300 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = -10.0 tmp5 = tmp3 * tmp4 tmp6 = tl_math.exp(tmp5) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (300,), (1,)) assert_size_stride(arg1_1, (4, 4, 4, 300), (4800, 1200, 300, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 300), (4800, 1200, 300, 1), torch.float32) get_raw_stream(0) triton_poi_fused_exp_mul_pow_sub_0[grid(19200)](arg1_1, arg0_1, buf0, 19200, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class RBFExpansionNew(nn.Module): """Expand distances between nodes by radial basis functions. .. math:: \\exp(- \\gamma * ||d - \\mu||^2) where :math:`d` is the distance between two nodes and :math:`\\mu` helps centralizes the distances. We use multiple centers evenly distributed in the range of :math:`[\\text{low}, \\text{high}]` with the difference between two adjacent centers being :math:`gap`. The number of centers is decided by :math:`(\\text{high} - \\text{low}) / \\text{gap}`. Choosing fewer centers corresponds to reducing the resolution of the filter. Parameters ---------- low : float Smallest center. Default to 0. high : float Largest center. Default to 30. gap : float Difference between two adjacent centers. :math:`\\gamma` will be computed as the reciprocal of gap. Default to 0.1. """ def __init__(self, low=0.0, high=30.0, gap=0.1): super(RBFExpansionNew, self).__init__() num_centers = int(np.ceil((high - low) / gap)) self.centers = np.linspace(low, high, num_centers) self.centers = nn.Parameter(torch.tensor(self.centers).float(), requires_grad=False) self.gamma = 1 / gap def reset_parameters(self): """Reinitialize model parameters.""" self.centers = nn.Parameter(self.centers.clone().detach().float(), requires_grad=False) def forward(self, input_0): arg0_1 = self.centers arg1_1 = input_0 output = call([arg0_1, arg1_1]) return output[0]
padr31/dgl-lifesci
RBFExpansion
false
16,241
[ "Apache-2.0" ]
390
932581468b330862836c0f050077fa33d0eb9405
https://github.com/padr31/dgl-lifesci/tree/932581468b330862836c0f050077fa33d0eb9405
InfoNCELoss
import torch from torch import Tensor from torch.nn.modules.loss import _Loss def cos_sim_matrix(a: 'Tensor', b: 'Tensor', eps: 'float'=1e-08) ->Tensor: a_n, b_n = a.norm(dim=1), b.norm(dim=1) a_norm = a / torch.clamp(a_n.unsqueeze(1), min=eps) b_norm = b / torch.clamp(b_n.unsqueeze(1), min=eps) sim_matrix = torch.mm(a_norm, b_norm.transpose(0, 1)) return sim_matrix class InfoNCELoss(_Loss): """Info NCE Loss. A type of contrastive loss function used for self-supervised learning. References: A. Oord, Y. Li, and O. Vinyals, "Representation Learning with Contrastive Predictive Coding," ArXiv:1807.03748 [cs.LG], 2018. <https://arxiv.org/abs/1807.03748v2> """ def __init__(self, reduction: 'str'='sum') ->None: """ Args: reduction (str) """ super().__init__(reduction=reduction) self.reduction = reduction def forward(self, z_origin: 'Tensor', z_noisy: 'Tensor', t: 'float'=0.7 ) ->Tensor: sim = cos_sim_matrix(z_origin, z_noisy) exp_sim = torch.exp(sim / t) loss = -torch.log(torch.diagonal(exp_sim) / exp_sim.sum(1)) if self.reduction == 'sum': loss = loss.sum() elif self.reduction == 'mean': loss = loss.mean() return loss def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import Tensor from torch.nn.modules.loss import _Loss assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clamp_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_per_fused_div_exp_log_neg_sum_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 5 * r0, None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp1 = 1.4285714285714286 tmp2 = tmp0 * tmp1 tmp3 = tl_math.exp(tmp2) tmp5 = tmp4 * tmp1 tmp6 = tl_math.exp(tmp5) tmp8 = tmp7 * tmp1 tmp9 = tl_math.exp(tmp8) tmp10 = tmp6 + tmp9 tmp12 = tmp11 * tmp1 tmp13 = tl_math.exp(tmp12) tmp14 = tmp10 + tmp13 tmp16 = tmp15 * tmp1 tmp17 = tl_math.exp(tmp16) tmp18 = tmp14 + tmp17 tmp19 = tmp3 / tmp18 tmp20 = tl_math.log(tmp19) tmp21 = -tmp20 tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK]) tmp24 = tl.sum(tmp22, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp24, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_div_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_clamp_div_0[grid(16)](arg1_1, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) del buf0 del buf1 buf3 = empty_strided_cuda((), (), torch.float32) triton_per_fused_div_exp_log_neg_sum_1[grid(1)](buf2, buf3, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf2 return buf3, def cos_sim_matrix(a: 'Tensor', b: 'Tensor', eps: 'float'=1e-08) ->Tensor: a_n, b_n = a.norm(dim=1), b.norm(dim=1) a_norm = a / torch.clamp(a_n.unsqueeze(1), min=eps) b_norm = b / torch.clamp(b_n.unsqueeze(1), min=eps) sim_matrix = torch.mm(a_norm, b_norm.transpose(0, 1)) return sim_matrix class InfoNCELossNew(_Loss): """Info NCE Loss. A type of contrastive loss function used for self-supervised learning. References: A. Oord, Y. Li, and O. Vinyals, "Representation Learning with Contrastive Predictive Coding," ArXiv:1807.03748 [cs.LG], 2018. <https://arxiv.org/abs/1807.03748v2> """ def __init__(self, reduction: 'str'='sum') ->None: """ Args: reduction (str) """ super().__init__(reduction=reduction) self.reduction = reduction def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
pfnet-research/deep-table
InfoNCELoss
false
16,242
[ "MIT" ]
48
a19c0c3048484017d5f24806604c3b3470bcf550
https://github.com/pfnet-research/deep-table/tree/a19c0c3048484017d5f24806604c3b3470bcf550
ChamferLoss
import torch import torch.nn as nn class ChamferLoss(nn.Module): def __init__(self, input_channels, reduction='mean'): super(ChamferLoss, self).__init__() self.input_channels = input_channels def forward(self, x, y): x.shape[0] num_points = x.shape[1] x = x[:, :, :self.input_channels] y = y[:, :, :self.input_channels] xx = torch.bmm(x, x.transpose(2, 1)) yy = torch.bmm(y, y.transpose(2, 1)) zz = torch.bmm(x, y.transpose(2, 1)) indices = torch.arange(0, num_points) rx = xx[:, indices, indices].unsqueeze(1).expand(xx.size()) ry = yy[:, indices, indices].unsqueeze(1).expand(yy.size()) pdist = rx.transpose(2, 1) + ry - 2 * zz loss = torch.mean(pdist.min(1)[0]) + torch.mean(pdist.min(2)[0]) return loss def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_mean_min_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex // 4 r0 = rindex % 4 r2 = rindex tmp0 = tl.load(in_ptr0 + 16 * r1, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (5 * r0 + 16 * r1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (r0 + 16 * r1), None) tmp7 = tl.load(in_ptr0 + (5 + 16 * r1), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (4 + r0 + 16 * r1), None) tmp13 = tl.load(in_ptr0 + (10 + 16 * r1), None, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr2 + (8 + r0 + 16 * r1), None) tmp19 = tl.load(in_ptr0 + (15 + 16 * r1), None, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr2 + (12 + r0 + 16 * r1), None) tmp28 = tl.load(in_ptr0 + (5 * r0 + 16 * r1), None, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr1 + 16 * r1, None, eviction_policy='evict_last') tmp31 = tl.load(in_ptr2 + 4 * r2, None, eviction_policy='evict_last') tmp34 = tl.load(in_ptr1 + (5 + 16 * r1), None, eviction_policy='evict_last' ) tmp36 = tl.load(in_ptr2 + (1 + 4 * r2), None, eviction_policy='evict_last') tmp40 = tl.load(in_ptr1 + (10 + 16 * r1), None, eviction_policy= 'evict_last') tmp42 = tl.load(in_ptr2 + (2 + 4 * r2), None, eviction_policy='evict_last') tmp46 = tl.load(in_ptr1 + (15 + 16 * r1), None, eviction_policy= 'evict_last') tmp48 = tl.load(in_ptr2 + (3 + 4 * r2), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = 2.0 tmp5 = tmp3 * tmp4 tmp6 = tmp2 - tmp5 tmp8 = tmp7 + tmp1 tmp10 = tmp9 * tmp4 tmp11 = tmp8 - tmp10 tmp12 = triton_helpers.minimum(tmp6, tmp11) tmp14 = tmp13 + tmp1 tmp16 = tmp15 * tmp4 tmp17 = tmp14 - tmp16 tmp18 = triton_helpers.minimum(tmp12, tmp17) tmp20 = tmp19 + tmp1 tmp22 = tmp21 * tmp4 tmp23 = tmp20 - tmp22 tmp24 = triton_helpers.minimum(tmp18, tmp23) tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.sum(tmp25, 1)[:, None] tmp30 = tmp28 + tmp29 tmp32 = tmp31 * tmp4 tmp33 = tmp30 - tmp32 tmp35 = tmp28 + tmp34 tmp37 = tmp36 * tmp4 tmp38 = tmp35 - tmp37 tmp39 = triton_helpers.minimum(tmp33, tmp38) tmp41 = tmp28 + tmp40 tmp43 = tmp42 * tmp4 tmp44 = tmp41 - tmp43 tmp45 = triton_helpers.minimum(tmp39, tmp44) tmp47 = tmp28 + tmp46 tmp49 = tmp48 * tmp4 tmp50 = tmp47 - tmp49 tmp51 = triton_helpers.minimum(tmp45, tmp50) tmp52 = tl.broadcast_to(tmp51, [XBLOCK, RBLOCK]) tmp54 = tl.sum(tmp52, 1)[:, None] tmp55 = 16.0 tmp56 = tmp27 / tmp55 tmp57 = tmp54 / tmp55 tmp58 = tmp56 + tmp57 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp58, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg0_1, reinterpret_tensor(arg0_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg1_1, reinterpret_tensor(arg1_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg0_1, reinterpret_tensor(arg1_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf2) del arg0_1 del arg1_1 buf5 = empty_strided_cuda((), (), torch.float32) buf7 = buf5 del buf5 get_raw_stream(0) triton_per_fused_add_mean_min_mul_sub_0[grid(1)](buf7, buf0, buf1, buf2, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 del buf2 return buf7, class ChamferLossNew(nn.Module): def __init__(self, input_channels, reduction='mean'): super(ChamferLossNew, self).__init__() self.input_channels = input_channels def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
pfe-everis/lcd
ChamferLoss
false
16,243
[ "BSD-3-Clause" ]
76
25f3fe7dc7e0c8ba02fb380dbcbe7752747b3fb5
https://github.com/pfe-everis/lcd/tree/25f3fe7dc7e0c8ba02fb380dbcbe7752747b3fb5
EquiangularAvgUnpool
import math import torch from torch import nn import torch.nn.functional as F def equiangular_bandwidth(nodes): """Calculate the equiangular bandwidth based on input nodes Args: nodes (int): the number of nodes should be a power of 4 Returns: int: the corresponding bandwidth """ bw = math.sqrt(nodes) / 2 return bw def equiangular_dimension_unpack(nodes, ratio): """Calculate the two underlying dimensions from the total number of nodes Args: nodes (int): combined dimensions ratio (float): ratio between the two dimensions Returns: int, int: separated dimensions """ dim1 = int((nodes / ratio) ** 0.5) dim2 = int((nodes * ratio) ** 0.5) return dim1, dim2 def equiangular_calculator(tensor, ratio): """From a 3D input tensor and a known ratio between the latitude dimension and longitude dimension of the data, reformat the 3D input into a 4D output while also obtaining the bandwidth. Args: tensor (:obj:`torch.tensor`): 3D input tensor ratio (float): the ratio between the latitude and longitude dimension of the data Returns: :obj:`torch.tensor`, int, int: 4D tensor, the bandwidths for lat. and long. """ N, M, F = tensor.size() dim1, dim2 = equiangular_dimension_unpack(M, ratio) bw_dim1 = equiangular_bandwidth(dim1) bw_dim2 = equiangular_bandwidth(dim2) tensor = tensor.view(N, dim1, dim2, F) return tensor, [bw_dim1, bw_dim2] def reformat(x): """Reformat the input from a 4D tensor to a 3D tensor Args: x (:obj:`torch.tensor`): a 4D tensor Returns: :obj:`torch.tensor`: a 3D tensor """ x = x.permute(0, 2, 3, 1) N, D1, D2, Feat = x.size() x = x.view(N, D1 * D2, Feat) return x class EquiangularAvgUnpool(nn.Module): """EquiAngular Average Unpooling version 1 using the interpolate function when unpooling """ def __init__(self, ratio): """Initialization Args: ratio (float): ratio between latitude and longitude dimensions of the data """ self.ratio = ratio self.kernel_size = 4 super().__init__() def forward(self, x): """calls pytorch's interpolate function to create the values while unpooling based on the nearby values Args: x (:obj:`torch.tensor`): batch x pixels x features Returns: :obj:`torch.tensor`: batch x unpooled pixels x features """ x, _ = equiangular_calculator(x, self.ratio) x = x.permute(0, 3, 1, 2) x = F.interpolate(x, scale_factor=(self.kernel_size, self. kernel_size), mode='nearest') x = reformat(x) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'ratio': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__unsafe_index_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 64 % 4 x1 = xindex // 4 % 16 x0 = xindex % 4 x3 = xindex // 256 x5 = xindex tmp0 = x2 tmp1 = tmp0.to(tl.float32) tmp2 = 0.25 tmp3 = tmp1 * tmp2 tmp3.to(tl.int32) tmp5 = x1 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (x0 + 4 * tmp8 + 16 * x3), xmask) tl.store(out_ptr0 + x5, tmp9, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 16), (256, 1, 64, 4), torch.float32 ) get_raw_stream(0) triton_poi_fused__unsafe_index_clone_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 64, 4), (256, 4, 1), 0), def equiangular_bandwidth(nodes): """Calculate the equiangular bandwidth based on input nodes Args: nodes (int): the number of nodes should be a power of 4 Returns: int: the corresponding bandwidth """ bw = math.sqrt(nodes) / 2 return bw def equiangular_dimension_unpack(nodes, ratio): """Calculate the two underlying dimensions from the total number of nodes Args: nodes (int): combined dimensions ratio (float): ratio between the two dimensions Returns: int, int: separated dimensions """ dim1 = int((nodes / ratio) ** 0.5) dim2 = int((nodes * ratio) ** 0.5) return dim1, dim2 def equiangular_calculator(tensor, ratio): """From a 3D input tensor and a known ratio between the latitude dimension and longitude dimension of the data, reformat the 3D input into a 4D output while also obtaining the bandwidth. Args: tensor (:obj:`torch.tensor`): 3D input tensor ratio (float): the ratio between the latitude and longitude dimension of the data Returns: :obj:`torch.tensor`, int, int: 4D tensor, the bandwidths for lat. and long. """ N, M, F = tensor.size() dim1, dim2 = equiangular_dimension_unpack(M, ratio) bw_dim1 = equiangular_bandwidth(dim1) bw_dim2 = equiangular_bandwidth(dim2) tensor = tensor.view(N, dim1, dim2, F) return tensor, [bw_dim1, bw_dim2] def reformat(x): """Reformat the input from a 4D tensor to a 3D tensor Args: x (:obj:`torch.tensor`): a 4D tensor Returns: :obj:`torch.tensor`: a 3D tensor """ x = x.permute(0, 2, 3, 1) N, D1, D2, Feat = x.size() x = x.view(N, D1 * D2, Feat) return x class EquiangularAvgUnpoolNew(nn.Module): """EquiAngular Average Unpooling version 1 using the interpolate function when unpooling """ def __init__(self, ratio): """Initialization Args: ratio (float): ratio between latitude and longitude dimensions of the data """ self.ratio = ratio self.kernel_size = 4 super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
phil-hawkins/deepsphere-pytorch
EquiangularAvgUnpool
false
16,244
[ "MIT" ]
99
f23c531445b3ddf234c7e98cdadb010163051e6d
https://github.com/phil-hawkins/deepsphere-pytorch/tree/f23c531445b3ddf234c7e98cdadb010163051e6d
HealpixMaxPool
import torch from torch import nn import torch.nn.functional as F class HealpixMaxPool(nn.MaxPool1d): """Healpix Maxpooling module """ def __init__(self, return_indices=False): """Initialization """ super().__init__(kernel_size=4, return_indices=return_indices) def forward(self, x): """Forward call the 1d Maxpooling of pytorch Args: x (:obj:`torch.tensor`):[batch x pixels x features] Returns: tuple((:obj:`torch.tensor`), indices (int)): [batch x pooled pixels x features] and indices of pooled pixels """ x = x.permute(0, 2, 1) if self.return_indices: x, indices = F.max_pool1d(x, self.kernel_size) else: x = F.max_pool1d(x, self.kernel_size) x = x.permute(0, 2, 1) if self.return_indices: output = x, indices else: output = x return output def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0), class HealpixMaxPoolNew(nn.MaxPool1d): """Healpix Maxpooling module """ def __init__(self, return_indices=False): """Initialization """ super().__init__(kernel_size=4, return_indices=return_indices) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
phil-hawkins/deepsphere-pytorch
HealpixMaxPool
false
16,245
[ "MIT" ]
99
f23c531445b3ddf234c7e98cdadb010163051e6d
https://github.com/phil-hawkins/deepsphere-pytorch/tree/f23c531445b3ddf234c7e98cdadb010163051e6d
Normalization
import numbers import torch import torch.nn as nn class Normalization(nn.Module): """A normalization layer.""" def __init__(self, eps: 'numbers.Real'=1e-15): """Creates a new instance of ``Normalization``. Args: eps (numbers.Real, optional): A tiny number to be added to the standard deviation before re-scaling the centered values. This prevents divide-by-0 errors. By default, this is set to ``1e-15``. """ super().__init__() self._eps = None self.eps = float(eps) @property def eps(self) ->float: """float: A tiny number that is added to the standard deviation before re-scaling the centered values. This prevents divide-by-0 errors. By default, this is set to ``1e-15``. """ return self._eps @eps.setter def eps(self, eps: 'numbers.Real') ->None: if not isinstance(eps, numbers.Real): raise TypeError('<eps> has to be a real number!') self._eps = float(eps) def forward(self, x: 'torch.FloatTensor') ->torch.FloatTensor: """Runs the normalization layer. Args: x (torch.FloatTensor): A tensor to be normalized. To that end, ``x`` is interpreted as a batch of values where normalization is applied over the last of its dimensions. Returns: torch.FloatTensor: The normalized tensor. """ mean = torch.mean(x, dim=-1, keepdim=True) std = torch.std(x, dim=-1, keepdim=True) return (x - mean) / (std + self._eps) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import numbers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mean_std_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = 3.0 tmp23 = tmp21 / tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = 1e-15 tmp26 = tmp24 + tmp25 tmp27 = tmp10 / tmp26 tl.store(out_ptr0 + x2, tmp27, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mean_std_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class NormalizationNew(nn.Module): """A normalization layer.""" def __init__(self, eps: 'numbers.Real'=1e-15): """Creates a new instance of ``Normalization``. Args: eps (numbers.Real, optional): A tiny number to be added to the standard deviation before re-scaling the centered values. This prevents divide-by-0 errors. By default, this is set to ``1e-15``. """ super().__init__() self._eps = None self.eps = float(eps) @property def eps(self) ->float: """float: A tiny number that is added to the standard deviation before re-scaling the centered values. This prevents divide-by-0 errors. By default, this is set to ``1e-15``. """ return self._eps @eps.setter def eps(self, eps: 'numbers.Real') ->None: if not isinstance(eps, numbers.Real): raise TypeError('<eps> has to be a real number!') self._eps = float(eps) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
phohenecker/pytorch-transformer
Normalization
false
16,246
[ "BSD-2-Clause" ]
50
211406d82ac04a7b473bcdebda77cc3c2e9af0cf
https://github.com/phohenecker/pytorch-transformer/tree/211406d82ac04a7b473bcdebda77cc3c2e9af0cf
VAE
import torch import torch.nn.functional as F import torch.nn as nn import torch.nn.modules.loss class VAE(nn.Module): """ Variational Autoencoder for dimensional reduction""" def __init__(self, dim): super(VAE, self).__init__() self.dim = dim self.fc1 = nn.Linear(dim, 400) self.fc21 = nn.Linear(400, 20) self.fc22 = nn.Linear(400, 20) self.fc3 = nn.Linear(20, 400) self.fc4 = nn.Linear(400, dim) def encode(self, x): h1 = F.relu(self.fc1(x)) return self.fc21(h1), self.fc22(h1) def reparameterize(self, mu, logvar): std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return mu + eps * std def decode(self, z): h3 = F.relu(self.fc3(z)) return torch.sigmoid(self.fc4(h3)) def forward(self, x): mu, logvar = self.encode(x.view(-1, self.dim)) z = self.reparameterize(mu, logvar) return self.decode(z), mu, logvar, z def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch import device from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn.functional as F import torch.nn as nn import torch.nn.modules.loss assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 25600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 400 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1280 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask) tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp1 * tmp5 tmp7 = tmp0 + tmp6 tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (400, 4), (4, 1)) assert_size_stride(primals_3, (400,), (1,)) assert_size_stride(primals_4, (20, 400), (400, 1)) assert_size_stride(primals_5, (20,), (1,)) assert_size_stride(primals_6, (20, 400), (400, 1)) assert_size_stride(primals_7, (20,), (1,)) assert_size_stride(primals_8, (400, 20), (20, 1)) assert_size_stride(primals_9, (400,), (1,)) assert_size_stride(primals_10, (4, 400), (400, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 400), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(25600)](buf1, primals_3, 25600, XBLOCK =128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 20), (20, 1), torch.float32) extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (400, 20), (1, 400), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((64, 20), (20, 1), torch.float32) extern_kernels.addmm(primals_7, buf1, reinterpret_tensor(primals_6, (400, 20), (1, 400), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = torch.ops.aten.randn.default([64, 20], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf5 = buf4 del buf4 buf6 = empty_strided_cuda((64, 20), (20, 1), torch.float32) triton_poi_fused_add_exp_mul_1[grid(1280)](buf2, buf5, buf3, buf6, 1280, XBLOCK=256, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((64, 400), (400, 1), torch.float32) extern_kernels.mm(buf6, reinterpret_tensor(primals_8, (20, 400), (1, 20), 0), out=buf7) buf8 = buf7 del buf7 triton_poi_fused_relu_0[grid(25600)](buf8, primals_9, 25600, XBLOCK =128, num_warps=4, num_stages=1) del primals_9 buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf8, reinterpret_tensor(primals_10, (400, 4), (1, 400), 0), out=buf9) buf10 = buf9 del buf9 triton_poi_fused_sigmoid_2[grid(256)](buf10, primals_11, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_11 return (buf10, buf2, buf3, buf6, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, buf3, buf5, buf6, buf8, buf10, primals_10, primals_8, primals_6, primals_4) class VAENew(nn.Module): """ Variational Autoencoder for dimensional reduction""" def __init__(self, dim): super(VAENew, self).__init__() self.dim = dim self.fc1 = nn.Linear(dim, 400) self.fc21 = nn.Linear(400, 20) self.fc22 = nn.Linear(400, 20) self.fc3 = nn.Linear(20, 400) self.fc4 = nn.Linear(400, dim) def encode(self, x): h1 = F.relu(self.fc1(x)) return self.fc21(h1), self.fc22(h1) def reparameterize(self, mu, logvar): std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return mu + eps * std def decode(self, z): h3 = F.relu(self.fc3(z)) return torch.sigmoid(self.fc4(h3)) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc21.weight primals_5 = self.fc21.bias primals_6 = self.fc22.weight primals_7 = self.fc22.bias primals_8 = self.fc3.weight primals_9 = self.fc3.bias primals_10 = self.fc4.weight primals_11 = self.fc4.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0], output[1], output[2], output[3]
peterfeifanchen/scGNN
VAE
false
16,247
[ "MIT" ]
60
4ef9013ad0f44f9f51708e9bb60e5138f5706593
https://github.com/peterfeifanchen/scGNN/tree/4ef9013ad0f44f9f51708e9bb60e5138f5706593
HardTripletLoss
import torch import torch.nn as nn def _pairwise_distance_squared(x, y): xx = torch.sum(torch.pow(x, 2), 1).view(-1, 1) yy = torch.sum(torch.pow(y, 2), 1).view(1, -1) pdist = xx + yy - 2.0 * torch.mm(x, torch.t(y)) return pdist class HardTripletLoss(nn.Module): def __init__(self, margin=0.2, hardest=False): super(HardTripletLoss, self).__init__() self.margin = margin self.hardest = hardest def forward(self, x, y): batch_size = x.shape[0] pdist = _pairwise_distance_squared(x, y) if self.hardest: diag = torch.arange(0, batch_size) diag = diag d_pos = pdist[diag, diag] pdist[diag, diag] = float('Inf') d_neg, _ = torch.min(pdist, 1) loss = torch.clamp(d_pos - d_neg + self.margin, min=0.0) loss = torch.mean(loss) else: diag = torch.arange(0, batch_size) diag = diag d_pos = pdist[diag, diag].unsqueeze(1) loss = d_pos - pdist + self.margin loss = torch.clamp(loss, min=0.0) loss[diag, diag] = 0.0 loss = torch.mean(loss) return loss def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp12 = tmp11 * tmp11 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp10 + tmp21 tmp24 = 2.0 tmp25 = tmp23 * tmp24 tmp26 = tmp22 - tmp25 tl.store(in_out_ptr0 + x2, tmp26, xmask) @triton.jit def triton_poi_fused_add_clamp_sub_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 5 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + x2, xmask) tmp2 = tmp0 - tmp1 tmp3 = 0.2 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = triton_helpers.maximum(tmp4, tmp5) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_clamp_index_put_lift_fresh_sub_2(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + 5 * x0, tmp0, xmask) @triton.jit def triton_per_fused_mean_3(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp4 = 16.0 tmp5 = tmp3 / tmp4 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(arg0_1, reinterpret_tensor(arg1_1, (4, 4), (1, 4), 0), out=buf0) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_mul_sub_0[grid(16)](buf1, arg0_1, arg1_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 del arg1_1 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_clamp_sub_1[grid(16)](buf1, buf2, 16, XBLOCK= 16, num_warps=1, num_stages=1) del buf1 triton_poi_fused_add_clamp_index_put_lift_fresh_sub_2[grid(4)](buf2, 4, XBLOCK=4, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4 del buf4 triton_per_fused_mean_3[grid(1)](buf5, buf2, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf2 return buf5, def _pairwise_distance_squared(x, y): xx = torch.sum(torch.pow(x, 2), 1).view(-1, 1) yy = torch.sum(torch.pow(y, 2), 1).view(1, -1) pdist = xx + yy - 2.0 * torch.mm(x, torch.t(y)) return pdist class HardTripletLossNew(nn.Module): def __init__(self, margin=0.2, hardest=False): super(HardTripletLossNew, self).__init__() self.margin = margin self.hardest = hardest def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
pfe-everis/lcd
HardTripletLoss
false
16,248
[ "BSD-3-Clause" ]
76
25f3fe7dc7e0c8ba02fb380dbcbe7752747b3fb5
https://github.com/pfe-everis/lcd/tree/25f3fe7dc7e0c8ba02fb380dbcbe7752747b3fb5
Normalize
import torch import torch.nn as nn class Normalize(nn.Module): def __init__(self, dim): super().__init__() self.dim = dim def forward(self, x): norm = torch.norm(x, p=2, dim=self.dim, keepdim=True) return x / norm def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp0 / tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_linalg_vector_norm_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class NormalizeNew(nn.Module): def __init__(self, dim): super().__init__() self.dim = dim def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
phuochieu212/PointGLR
Normalize
false
16,249
[ "MIT" ]
104
37017b1af31486aa9d516a3762725a650dca9ad1
https://github.com/phuochieu212/PointGLR/tree/37017b1af31486aa9d516a3762725a650dca9ad1
BertSelfOutput
from _paritybench_helpers import _mock_config import torch import torch.nn as nn import torch.utils.checkpoint class BertSelfOutput(nn.Module): def __init__(self, config, twin=False, merge=False): super().__init__() self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) if twin: self.dense0 = nn.Linear(config.hidden_size, config.hidden_size) self.dense1 = nn.Linear(config.hidden_size, config.hidden_size) else: self.dense = nn.Linear(config.hidden_size, config.hidden_size) if merge: self.act = ACT2FN[config.hidden_act] self.merge_layer = nn.Linear(config.hidden_size * 2, config. hidden_size) self.merge = True else: self.merge = False def forward(self, hidden_states, input_tensor): if type(hidden_states) == list: hidden_states0 = self.dense0(hidden_states[0]) hidden_states1 = self.dense1(hidden_states[1]) if self.merge: hidden_states = self.merge_layer(torch.cat([hidden_states0, hidden_states1], dim=-1)) else: hidden_states = (hidden_states0 + hidden_states1) / 2 else: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, layer_norm_eps=1, hidden_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1.0 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_0[grid(256)](buf1, primals_3, primals_4, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](buf1, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_2[grid(256)](buf1, buf2, buf3, primals_5, primals_6, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del buf3 del primals_6 return buf4, primals_5, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), buf1 class BertSelfOutputNew(nn.Module): def __init__(self, config, twin=False, merge=False): super().__init__() self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) if twin: self.dense0 = nn.Linear(config.hidden_size, config.hidden_size) self.dense1 = nn.Linear(config.hidden_size, config.hidden_size) else: self.dense = nn.Linear(config.hidden_size, config.hidden_size) if merge: self.act = ACT2FN[config.hidden_act] self.merge_layer = nn.Linear(config.hidden_size * 2, config. hidden_size) self.merge = True else: self.merge = False def forward(self, input_0, input_1): primals_3 = self.LayerNorm.weight primals_5 = self.LayerNorm.bias primals_2 = self.dense.weight primals_6 = self.dense.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
igor0/BLIP
BertSelfOutput
false
16,250
[ "BSD-3-Clause" ]
473
6d8c3f1e381ed23acb84c55b4adb80e74c08117a
https://github.com/igor0/BLIP/tree/6d8c3f1e381ed23acb84c55b4adb80e74c08117a
N0reparameterize
import torch from torch import nn as nn import torch.nn.functional as F from torch.distributions import Normal class N0reparameterize(nn.Module): """Reparametrize zero mean Gaussian Variable.""" def __init__(self, input_dim, z_dim, fixed_sigma=None): super().__init__() self.input_dim = input_dim self.z_dim = z_dim self.sigma_linear = nn.Linear(input_dim, z_dim) self.return_means = False if fixed_sigma is not None: self.register_buffer('fixed_sigma', torch.tensor(fixed_sigma)) else: self.fixed_sigma = None self.sigma = None self.z = None def forward(self, x, n=1): if self.fixed_sigma is not None: self.sigma = x.new_full((x.shape[0], self.z_dim), self.fixed_sigma) else: self.sigma = F.softplus(self.sigma_linear(x)) self.z = self.nsample(n=n) return self.z def kl(self): return -0.5 * torch.sum(1 + 2 * self.sigma.log() - self.sigma ** 2, -1) def log_posterior(self): return self._log_posterior(self.z) def _log_posterior(self, z): return Normal(torch.zeros_like(self.sigma), self.sigma).log_prob(z ).sum(-1) def log_prior(self): return Normal(torch.zeros_like(self.sigma), torch.ones_like(self.sigma) ).log_prob(self.z).sum(-1) def nsample(self, n=1): if self.return_means: return torch.zeros_like(self.sigma).expand(n, -1, -1) eps = Normal(torch.zeros_like(self.sigma), torch.ones_like(self.sigma) ).sample((n,)) return eps * self.sigma def deterministic(self): """Set to return means.""" self.return_means = True def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'z_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn as nn from torch.distributions import Normal assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_expand_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_expand_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 1.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_mul_softplus_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp6 = tl.load(in_ptr1 + x0, xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp7 = tmp6 * tmp5 tl.store(out_ptr0 + x0, tmp5, xmask) tl.store(out_ptr1 + x0, tmp7, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf2 = empty_strided_cuda((1, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_expand_0[grid(256)](buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((1, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_expand_1[grid(256)](buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = torch.ops.aten.normal.Tensor_Tensor(buf2, buf3) buf5 = buf4 del buf4 buf1 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 buf6 = buf2 del buf2 triton_poi_fused_mul_softplus_2[grid(256)](buf0, buf5, buf1, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf6, buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, buf5 class N0reparameterizeNew(nn.Module): """Reparametrize zero mean Gaussian Variable.""" def __init__(self, input_dim, z_dim, fixed_sigma=None): super().__init__() self.input_dim = input_dim self.z_dim = z_dim self.sigma_linear = nn.Linear(input_dim, z_dim) self.return_means = False if fixed_sigma is not None: self.register_buffer('fixed_sigma', torch.tensor(fixed_sigma)) else: self.fixed_sigma = None self.sigma = None self.z = None def kl(self): return -0.5 * torch.sum(1 + 2 * self.sigma.log() - self.sigma ** 2, -1) def log_posterior(self): return self._log_posterior(self.z) def _log_posterior(self, z): return Normal(torch.zeros_like(self.sigma), self.sigma).log_prob(z ).sum(-1) def log_prior(self): return Normal(torch.zeros_like(self.sigma), torch.ones_like(self.sigma) ).log_prob(self.z).sum(-1) def nsample(self, n=1): if self.return_means: return torch.zeros_like(self.sigma).expand(n, -1, -1) eps = Normal(torch.zeros_like(self.sigma), torch.ones_like(self.sigma) ).sample((n,)) return eps * self.sigma def deterministic(self): """Set to return means.""" self.return_means = True def forward(self, input_0): primals_1 = self.sigma_linear.weight primals_2 = self.sigma_linear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
pimdh/lie-vae
N0reparameterize
false
16,251
[ "MIT" ]
83
0e0cc4d533c064fcfc405e8a75449f8b2f6cf8cf
https://github.com/pimdh/lie-vae/tree/0e0cc4d533c064fcfc405e8a75449f8b2f6cf8cf
ChamferLoss
import torch import torch.nn as nn class ChamferLoss(nn.Module): def __init__(self): super().__init__() def forward(self, x, y): """ :param x: (bs, np, 3) :param y: (bs, np, 3) :return: loss """ x = x.unsqueeze(1) y = y.unsqueeze(2) dist = torch.sqrt(1e-06 + torch.sum(torch.pow(x - y, 2), 3)) min1, _ = torch.min(dist, 1) min2, _ = torch.min(dist, 2) return min1.mean() + min2.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_pow_sqrt_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x3 = xindex // 64 x4 = xindex // 16 x5 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 16 * x4), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (4 + x0 + 16 * x1 + 64 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (4 + x0 + 16 * x4), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (8 + x0 + 16 * x1 + 64 * x3), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (8 + x0 + 16 * x4), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr0 + (12 + x0 + 16 * x1 + 64 * x3), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (12 + x0 + 16 * x4), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = 1e-06 tmp20 = tmp18 + tmp19 tmp21 = libdevice.sqrt(tmp20) tl.store(out_ptr0 + x5, tmp21, xmask) @triton.jit def triton_per_fused_add_mean_min_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 r2 = rindex % 4 r3 = rindex // 4 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp3 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp10 = tl.load(in_ptr0 + (r2 + 16 * r3), None) tmp11 = tl.load(in_ptr0 + (4 + r2 + 16 * r3), None) tmp13 = tl.load(in_ptr0 + (8 + r2 + 16 * r3), None) tmp15 = tl.load(in_ptr0 + (12 + r2 + 16 * r3), None) tmp2 = triton_helpers.minimum(tmp0, tmp1) tmp4 = triton_helpers.minimum(tmp2, tmp3) tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp12 = triton_helpers.minimum(tmp10, tmp11) tmp14 = triton_helpers.minimum(tmp12, tmp13) tmp16 = triton_helpers.minimum(tmp14, tmp15) tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.sum(tmp17, 1)[:, None] tmp20 = 64.0 tmp21 = tmp9 / tmp20 tmp22 = tmp19 / tmp20 tmp23 = tmp21 + tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp23, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_pow_sqrt_sub_sum_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf3 = buf1 del buf1 triton_per_fused_add_mean_min_1[grid(1)](buf3, buf0, 1, 64, XBLOCK= 1, num_warps=2, num_stages=1) del buf0 return buf3, class ChamferLossNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
phuochieu212/PointGLR
ChamferLoss
false
16,252
[ "MIT" ]
104
37017b1af31486aa9d516a3762725a650dca9ad1
https://github.com/phuochieu212/PointGLR/tree/37017b1af31486aa9d516a3762725a650dca9ad1
QuaternionMean
import torch from torch import nn as nn def quaternions_to_group_matrix(q): """Normalises q and maps to group matrix.""" q = q / q.norm(p=2, dim=-1, keepdim=True) r, i, j, k = q[..., 0], q[..., 1], q[..., 2], q[..., 3] return torch.stack([r * r - i * i - j * j + k * k, 2 * (r * i + j * k), 2 * (r * j - i * k), 2 * (r * i - j * k), -r * r + i * i - j * j + k * k, 2 * (i * j + r * k), 2 * (r * j + i * k), 2 * (i * j - r * k ), -r * r - i * i + j * j + k * k], -1).view(*q.shape[:-1], 3, 3) class QuaternionMean(nn.Module): def __init__(self, input_dims): super().__init__() self.map = nn.Linear(input_dims, 4) def forward(self, x): return quaternions_to_group_matrix(self.map(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dims': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp0 / tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_stack_1(in_ptr0, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 - tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 - tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = tmp0 * tmp2 tmp12 = tmp5 * tmp8 tmp13 = tmp11 + tmp12 tmp14 = 2.0 tmp15 = tmp13 * tmp14 tmp16 = tmp0 * tmp5 tmp17 = tmp2 * tmp8 tmp18 = tmp16 - tmp17 tmp19 = tmp18 * tmp14 tmp20 = tmp11 - tmp12 tmp21 = tmp20 * tmp14 tmp22 = -tmp0 tmp23 = tmp22 * tmp0 tmp24 = tmp23 + tmp3 tmp25 = tmp24 - tmp6 tmp26 = tmp25 + tmp9 tmp27 = tmp2 * tmp5 tmp28 = tmp0 * tmp8 tmp29 = tmp27 + tmp28 tmp30 = tmp29 * tmp14 tmp31 = tmp16 + tmp17 tmp32 = tmp31 * tmp14 tmp33 = tmp27 - tmp28 tmp34 = tmp33 * tmp14 tmp35 = tmp23 - tmp3 tmp36 = tmp35 + tmp6 tmp37 = tmp36 + tmp9 tl.store(out_ptr0 + 9 * x0, tmp10, xmask) tl.store(out_ptr1 + 9 * x0, tmp15, xmask) tl.store(out_ptr2 + 9 * x0, tmp19, xmask) tl.store(out_ptr3 + 9 * x0, tmp21, xmask) tl.store(out_ptr4 + 9 * x0, tmp26, xmask) tl.store(out_ptr5 + 9 * x0, tmp30, xmask) tl.store(out_ptr6 + 9 * x0, tmp32, xmask) tl.store(out_ptr7 + 9 * x0, tmp34, xmask) tl.store(out_ptr8 + 9 * x0, tmp37, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_linalg_vector_norm_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf11 = empty_strided_cuda((4, 4, 4, 9), (144, 36, 9, 1), torch.float32 ) buf2 = reinterpret_tensor(buf11, (4, 4, 4, 1), (144, 36, 9, 1), 0) buf3 = reinterpret_tensor(buf11, (4, 4, 4, 1), (144, 36, 9, 1), 1) buf4 = reinterpret_tensor(buf11, (4, 4, 4, 1), (144, 36, 9, 1), 2) buf5 = reinterpret_tensor(buf11, (4, 4, 4, 1), (144, 36, 9, 1), 3) buf6 = reinterpret_tensor(buf11, (4, 4, 4, 1), (144, 36, 9, 1), 4) buf7 = reinterpret_tensor(buf11, (4, 4, 4, 1), (144, 36, 9, 1), 5) buf8 = reinterpret_tensor(buf11, (4, 4, 4, 1), (144, 36, 9, 1), 6) buf9 = reinterpret_tensor(buf11, (4, 4, 4, 1), (144, 36, 9, 1), 7) buf10 = reinterpret_tensor(buf11, (4, 4, 4, 1), (144, 36, 9, 1), 8) triton_poi_fused_stack_1[grid(64)](buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf1 return reinterpret_tensor(buf11, (4, 4, 4, 3, 3), (144, 36, 9, 3, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0 def quaternions_to_group_matrix(q): """Normalises q and maps to group matrix.""" q = q / q.norm(p=2, dim=-1, keepdim=True) r, i, j, k = q[..., 0], q[..., 1], q[..., 2], q[..., 3] return torch.stack([r * r - i * i - j * j + k * k, 2 * (r * i + j * k), 2 * (r * j - i * k), 2 * (r * i - j * k), -r * r + i * i - j * j + k * k, 2 * (i * j + r * k), 2 * (r * j + i * k), 2 * (i * j - r * k ), -r * r - i * i + j * j + k * k], -1).view(*q.shape[:-1], 3, 3) class QuaternionMeanNew(nn.Module): def __init__(self, input_dims): super().__init__() self.map = nn.Linear(input_dims, 4) def forward(self, input_0): primals_1 = self.map.weight primals_2 = self.map.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
pimdh/lie-vae
QuaternionMean
false
16,253
[ "MIT" ]
83
0e0cc4d533c064fcfc405e8a75449f8b2f6cf8cf
https://github.com/pimdh/lie-vae/tree/0e0cc4d533c064fcfc405e8a75449f8b2f6cf8cf
RelPositionMultiHeadedAttention
import math import torch from typing import Optional from typing import Tuple from torch import nn class MultiHeadedAttention(nn.Module): """Multi-Head Attention layer. Args: n_head (int): The number of heads. n_feat (int): The number of features. dropout_rate (float): Dropout rate. """ def __init__(self, n_head: 'int', n_feat: 'int', dropout_rate: 'float'): """Construct an MultiHeadedAttention object.""" super().__init__() assert n_feat % n_head == 0 self.d_k = n_feat // n_head self.h = n_head self.linear_q = nn.Linear(n_feat, n_feat) self.linear_k = nn.Linear(n_feat, n_feat) self.linear_v = nn.Linear(n_feat, n_feat) self.linear_out = nn.Linear(n_feat, n_feat) self.dropout = nn.Dropout(p=dropout_rate) def forward_qkv(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """Transform query, key and value. Args: query (torch.Tensor): Query tensor (#batch, time1, size). key (torch.Tensor): Key tensor (#batch, time2, size). value (torch.Tensor): Value tensor (#batch, time2, size). Returns: torch.Tensor: Transformed query tensor, size (#batch, n_head, time1, d_k). torch.Tensor: Transformed key tensor, size (#batch, n_head, time2, d_k). torch.Tensor: Transformed value tensor, size (#batch, n_head, time2, d_k). """ n_batch = query.size(0) q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k) k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k) v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k) q = q.transpose(1, 2) k = k.transpose(1, 2) v = v.transpose(1, 2) return q, k, v def forward_attention(self, value: 'torch.Tensor', scores: 'torch.Tensor', mask: 'Optional[torch.Tensor]') ->torch.Tensor: """Compute attention context vector. Args: value (torch.Tensor): Transformed value, size (#batch, n_head, time2, d_k). scores (torch.Tensor): Attention score, size (#batch, n_head, time1, time2). mask (torch.Tensor): Mask, size (#batch, 1, time2) or (#batch, time1, time2). Returns: torch.Tensor: Transformed value (#batch, time1, d_model) weighted by the attention score (#batch, time1, time2). """ n_batch = value.size(0) if mask is not None: mask = mask.unsqueeze(1).eq(0) scores = scores.masked_fill(mask, -float('inf')) attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0) else: attn = torch.softmax(scores, dim=-1) p_attn = self.dropout(attn) x = torch.matmul(p_attn, value) x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k) return self.linear_out(x) def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor', mask: 'Optional[torch.Tensor]', pos_emb: 'torch.Tensor'=torch.empty(0)) ->torch.Tensor: """Compute scaled dot product attention. Args: query (torch.Tensor): Query tensor (#batch, time1, size). key (torch.Tensor): Key tensor (#batch, time2, size). value (torch.Tensor): Value tensor (#batch, time2, size). mask (torch.Tensor): Mask tensor (#batch, 1, time2) or (#batch, time1, time2). 1.When applying cross attention between decoder and encoder, the batch padding mask for input is in (#batch, 1, T) shape. 2.When applying self attention of encoder, the mask is in (#batch, T, T) shape. 3.When applying self attention of decoder, the mask is in (#batch, L, L) shape. 4.If the different position in decoder see different block of the encoder, such as Mocha, the passed in mask could be in (#batch, L, T) shape. But there is no such case in current Wenet. Returns: torch.Tensor: Output tensor (#batch, time1, d_model). """ q, k, v = self.forward_qkv(query, key, value) scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k) return self.forward_attention(v, scores, mask) class RelPositionMultiHeadedAttention(MultiHeadedAttention): """Multi-Head Attention layer with relative position encoding. Paper: https://arxiv.org/abs/1901.02860 Args: n_head (int): The number of heads. n_feat (int): The number of features. dropout_rate (float): Dropout rate. """ def __init__(self, n_head, n_feat, dropout_rate): """Construct an RelPositionMultiHeadedAttention object.""" super().__init__(n_head, n_feat, dropout_rate) self.linear_pos = nn.Linear(n_feat, n_feat, bias=False) self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k)) self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k)) torch.nn.init.xavier_uniform_(self.pos_bias_u) torch.nn.init.xavier_uniform_(self.pos_bias_v) def rel_shift(self, x, zero_triu: 'bool'=False): """Compute relative positinal encoding. Args: x (torch.Tensor): Input tensor (batch, time, size). zero_triu (bool): If true, return the lower triangular part of the matrix. Returns: torch.Tensor: Output tensor. """ zero_pad = torch.zeros((x.size()[0], x.size()[1], x.size()[2], 1), device=x.device, dtype=x.dtype) x_padded = torch.cat([zero_pad, x], dim=-1) x_padded = x_padded.view(x.size()[0], x.size()[1], x.size(3) + 1, x .size(2)) x = x_padded[:, :, 1:].view_as(x) if zero_triu: ones = torch.ones((x.size(2), x.size(3))) x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :] return x def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor', mask: 'Optional[torch.Tensor]', pos_emb: 'torch.Tensor' ): """Compute 'Scaled Dot Product Attention' with rel. positional encoding. Args: query (torch.Tensor): Query tensor (#batch, time1, size). key (torch.Tensor): Key tensor (#batch, time2, size). value (torch.Tensor): Value tensor (#batch, time2, size). mask (torch.Tensor): Mask tensor (#batch, 1, time2) or (#batch, time1, time2). pos_emb (torch.Tensor): Positional embedding tensor (#batch, time2, size). Returns: torch.Tensor: Output tensor (#batch, time1, d_model). """ q, k, v = self.forward_qkv(query, key, value) q = q.transpose(1, 2) n_batch_pos = pos_emb.size(0) p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k) p = p.transpose(1, 2) q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2) q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2) matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1)) matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1)) scores = (matrix_ac + matrix_bd) / math.sqrt(self.d_k) return self.forward_attention(v, scores, mask) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'n_head': 4, 'n_feat': 4, 'dropout_rate': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math from typing import Optional from typing import Tuple from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + y0, ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp2 + tmp5 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) tl.store(out_ptr1 + (x2 + 4 * y3), tmp6, xmask & ymask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_eq_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_add_div_masked_fill_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (4 * x0 + 16 * x2), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 4 * x3, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy ='evict_last').to(tl.int1) tmp9 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (1 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last').to(tl.int1) tmp16 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr2 + (2 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last').to(tl.int1) tmp23 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp24 = tl.load(in_ptr2 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp3 = tmp1 + tmp2 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = float('-inf') tmp7 = tl.where(tmp0, tmp6, tmp5) tmp11 = tmp9 + tmp10 tmp12 = tmp11 * tmp4 tmp13 = tl.where(tmp8, tmp6, tmp12) tmp14 = triton_helpers.maximum(tmp7, tmp13) tmp18 = tmp16 + tmp17 tmp19 = tmp18 * tmp4 tmp20 = tl.where(tmp15, tmp6, tmp19) tmp21 = triton_helpers.maximum(tmp14, tmp20) tmp25 = tmp23 + tmp24 tmp26 = tmp25 * tmp4 tmp27 = tl.where(tmp22, tmp6, tmp26) tmp28 = triton_helpers.maximum(tmp21, tmp27) tmp29 = tmp7 - tmp28 tmp30 = tl_math.exp(tmp29) tmp31 = tmp13 - tmp28 tmp32 = tl_math.exp(tmp31) tmp33 = tmp30 + tmp32 tmp34 = tmp20 - tmp28 tmp35 = tl_math.exp(tmp34) tmp36 = tmp33 + tmp35 tmp37 = tmp27 - tmp28 tmp38 = tl_math.exp(tmp37) tmp39 = tmp36 + tmp38 tl.store(out_ptr0 + x3, tmp28, xmask) tl.store(out_ptr1 + x3, tmp39, xmask) @triton.jit def triton_poi_fused__softmax_add_div_masked_fill_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 64 x4 = xindex % 16 x5 = xindex x6 = xindex // 4 tmp0 = tl.load(in_ptr0 + (x4 + 16 * x3), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp1 = tl.load(in_out_ptr0 + x5, xmask) tmp2 = tl.load(in_ptr1 + x5, xmask) tmp8 = tl.load(in_ptr2 + x6, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr3 + x6, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = float('-inf') tmp7 = tl.where(tmp0, tmp6, tmp5) tmp9 = tmp7 - tmp8 tmp10 = tl_math.exp(tmp9) tmp12 = tmp10 / tmp11 tmp13 = 0.0 tmp14 = tl.where(tmp0, tmp13, tmp12) tl.store(in_out_ptr0 + x5, tmp12, xmask) tl.store(out_ptr0 + x5, tmp14, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4, 1), (1, 1)) assert_size_stride(primals_13, (4, 1), (1, 1)) assert_size_stride(primals_14, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_10, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf3) del primals_11 buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, primals_12, primals_13, buf4, buf7, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_12 del primals_13 del primals_3 buf5 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_clone_1[grid(16, 4)](buf1, primals_5, buf5, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf8 = reinterpret_tensor(buf1, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf1 triton_poi_fused_clone_2[grid(16, 4)](buf3, buf8, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf8, (16, 1, 4), (4, 0, 1), 0), out=buf9) buf10 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool) triton_poi_fused_eq_3[grid(64)](primals_14, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_14 buf11 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf3 buf12 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_add_div_masked_fill_4[grid(64)](buf10, buf6, buf9, buf11, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_add_div_masked_fill_5[grid(256)](buf13, buf10, buf9, buf11, buf12, buf14, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf9 buf15 = reinterpret_tensor(buf12, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf12 triton_poi_fused_clone_1[grid(16, 4)](buf2, primals_8, buf15, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_8 buf16 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf14, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf15, (16, 4, 1), (4, 1, 0), 0), out=buf16) buf17 = reinterpret_tensor(buf11, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf11 triton_poi_fused_clone_2[grid(16, 4)](buf16, buf17, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf18 = reinterpret_tensor(buf16, (16, 4), (4, 1), 0) del buf16 extern_kernels.addmm(primals_16, reinterpret_tensor(buf17, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf18) del primals_16 return reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_10, (16, 4), (4, 1), 0 ), buf10, buf13, reinterpret_tensor(buf17, (16, 4), (4, 1), 0 ), primals_15, reinterpret_tensor(buf14, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf15, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf7, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 4), 0 ), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0) class MultiHeadedAttention(nn.Module): """Multi-Head Attention layer. Args: n_head (int): The number of heads. n_feat (int): The number of features. dropout_rate (float): Dropout rate. """ def __init__(self, n_head: 'int', n_feat: 'int', dropout_rate: 'float'): """Construct an MultiHeadedAttention object.""" super().__init__() assert n_feat % n_head == 0 self.d_k = n_feat // n_head self.h = n_head self.linear_q = nn.Linear(n_feat, n_feat) self.linear_k = nn.Linear(n_feat, n_feat) self.linear_v = nn.Linear(n_feat, n_feat) self.linear_out = nn.Linear(n_feat, n_feat) self.dropout = nn.Dropout(p=dropout_rate) def forward_qkv(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """Transform query, key and value. Args: query (torch.Tensor): Query tensor (#batch, time1, size). key (torch.Tensor): Key tensor (#batch, time2, size). value (torch.Tensor): Value tensor (#batch, time2, size). Returns: torch.Tensor: Transformed query tensor, size (#batch, n_head, time1, d_k). torch.Tensor: Transformed key tensor, size (#batch, n_head, time2, d_k). torch.Tensor: Transformed value tensor, size (#batch, n_head, time2, d_k). """ n_batch = query.size(0) q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k) k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k) v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k) q = q.transpose(1, 2) k = k.transpose(1, 2) v = v.transpose(1, 2) return q, k, v def forward_attention(self, value: 'torch.Tensor', scores: 'torch.Tensor', mask: 'Optional[torch.Tensor]') ->torch.Tensor: """Compute attention context vector. Args: value (torch.Tensor): Transformed value, size (#batch, n_head, time2, d_k). scores (torch.Tensor): Attention score, size (#batch, n_head, time1, time2). mask (torch.Tensor): Mask, size (#batch, 1, time2) or (#batch, time1, time2). Returns: torch.Tensor: Transformed value (#batch, time1, d_model) weighted by the attention score (#batch, time1, time2). """ n_batch = value.size(0) if mask is not None: mask = mask.unsqueeze(1).eq(0) scores = scores.masked_fill(mask, -float('inf')) attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0) else: attn = torch.softmax(scores, dim=-1) p_attn = self.dropout(attn) x = torch.matmul(p_attn, value) x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k) return self.linear_out(x) def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor', mask: 'Optional[torch.Tensor]', pos_emb: 'torch.Tensor'=torch.empty(0)) ->torch.Tensor: """Compute scaled dot product attention. Args: query (torch.Tensor): Query tensor (#batch, time1, size). key (torch.Tensor): Key tensor (#batch, time2, size). value (torch.Tensor): Value tensor (#batch, time2, size). mask (torch.Tensor): Mask tensor (#batch, 1, time2) or (#batch, time1, time2). 1.When applying cross attention between decoder and encoder, the batch padding mask for input is in (#batch, 1, T) shape. 2.When applying self attention of encoder, the mask is in (#batch, T, T) shape. 3.When applying self attention of decoder, the mask is in (#batch, L, L) shape. 4.If the different position in decoder see different block of the encoder, such as Mocha, the passed in mask could be in (#batch, L, T) shape. But there is no such case in current Wenet. Returns: torch.Tensor: Output tensor (#batch, time1, d_model). """ q, k, v = self.forward_qkv(query, key, value) scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k) return self.forward_attention(v, scores, mask) class RelPositionMultiHeadedAttentionNew(MultiHeadedAttention): """Multi-Head Attention layer with relative position encoding. Paper: https://arxiv.org/abs/1901.02860 Args: n_head (int): The number of heads. n_feat (int): The number of features. dropout_rate (float): Dropout rate. """ def __init__(self, n_head, n_feat, dropout_rate): """Construct an RelPositionMultiHeadedAttention object.""" super().__init__(n_head, n_feat, dropout_rate) self.linear_pos = nn.Linear(n_feat, n_feat, bias=False) self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k)) self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k)) torch.nn.init.xavier_uniform_(self.pos_bias_u) torch.nn.init.xavier_uniform_(self.pos_bias_v) def rel_shift(self, x, zero_triu: 'bool'=False): """Compute relative positinal encoding. Args: x (torch.Tensor): Input tensor (batch, time, size). zero_triu (bool): If true, return the lower triangular part of the matrix. Returns: torch.Tensor: Output tensor. """ zero_pad = torch.zeros((x.size()[0], x.size()[1], x.size()[2], 1), device=x.device, dtype=x.dtype) x_padded = torch.cat([zero_pad, x], dim=-1) x_padded = x_padded.view(x.size()[0], x.size()[1], x.size(3) + 1, x .size(2)) x = x_padded[:, :, 1:].view_as(x) if zero_triu: ones = torch.ones((x.size(2), x.size(3))) x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :] return x def forward(self, input_0, input_1, input_2, input_3, input_4): primals_12 = self.pos_bias_u primals_13 = self.pos_bias_v primals_2 = self.linear_q.weight primals_3 = self.linear_q.bias primals_4 = self.linear_k.weight primals_5 = self.linear_k.bias primals_7 = self.linear_v.weight primals_8 = self.linear_v.bias primals_11 = self.linear_out.weight primals_16 = self.linear_out.bias primals_15 = self.linear_pos.weight primals_1 = input_0 primals_6 = input_1 primals_9 = input_2 primals_10 = input_3 primals_14 = input_4 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16]) return output[0]
pengchengguo/wenet
RelPositionMultiHeadedAttention
false
16,254
[ "Apache-2.0" ]
1,166
940dc164e5cfa9b8c0131688f0f9457af9563892
https://github.com/pengchengguo/wenet/tree/940dc164e5cfa9b8c0131688f0f9457af9563892
ClampExp
import torch import torch.utils.data class ClampExp(torch.nn.Module): """ Nonlinearity min(exp(lam * x), 1) """ def __init__(self): """ Constructor :param lam: Lambda parameter """ super(ClampExp, self).__init__() def forward(self, x): one = torch.tensor(1.0, device=x.device, dtype=x.dtype) return torch.min(torch.exp(x), one) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_exp_lift_fresh_minimum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.exp(tmp0) tmp2 = 1.0 tmp3 = triton_helpers.minimum(tmp1, tmp2) tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_exp_lift_fresh_minimum_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class ClampExpNew(torch.nn.Module): """ Nonlinearity min(exp(lam * x), 1) """ def __init__(self): """ Constructor :param lam: Lambda parameter """ super(ClampExpNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
pkulwj1994/normalizing-flows
ClampExp
false
16,255
[ "MIT" ]
96
326321c4aea4a3f6ab703f82e21277a79cd7d9e4
https://github.com/pkulwj1994/normalizing-flows/tree/326321c4aea4a3f6ab703f82e21277a79cd7d9e4
TVLoss
import torch from typing import Tuple from torch.nn.modules.loss import _Loss from typing import List from typing import Optional def _reduce(x: 'torch.Tensor', reduction: 'str'='mean') ->torch.Tensor: """Reduce input in batch dimension if needed. Args: x: Tensor with shape (N, *). reduction: Specifies the reduction type: ``'none'`` | ``'mean'`` | ``'sum'``. Default: ``'mean'`` """ if reduction == 'none': return x elif reduction == 'mean': return x.mean(dim=0) elif reduction == 'sum': return x.sum(dim=0) else: raise ValueError( "Uknown reduction. Expected one of {'none', 'mean', 'sum'}") def _validate_input(tensors: 'List[torch.Tensor]', dim_range: 'Tuple[int, int]'=(0, -1), data_range: 'Tuple[float, float]'=(0.0, -1.0 ), size_range: 'Optional[Tuple[int, int]]'=None) ->None: """Check that input(-s) satisfies the requirements Args: tensors: Tensors to check dim_range: Allowed number of dimensions. (min, max) data_range: Allowed range of values in tensors. (min, max) size_range: Dimensions to include in size comparison. (start_dim, end_dim + 1) """ if not __debug__: return x = tensors[0] for t in tensors: assert torch.is_tensor(t), f'Expected torch.Tensor, got {type(t)}' assert t.device == x.device, f'Expected tensors to be on {x.device}, got {t.device}' if size_range is None: assert t.size() == x.size( ), f'Expected tensors with same size, got {t.size()} and {x.size()}' else: assert t.size()[size_range[0]:size_range[1]] == x.size()[size_range [0]:size_range[1] ], f'Expected tensors with same size at given dimensions, got {t.size()} and {x.size()}' if dim_range[0] == dim_range[1]: assert t.dim() == dim_range[0 ], f'Expected number of dimensions to be {dim_range[0]}, got {t.dim()}' elif dim_range[0] < dim_range[1]: assert dim_range[0] <= t.dim() <= dim_range[1 ], f'Expected number of dimensions to be between {dim_range[0]} and {dim_range[1]}, got {t.dim()}' if data_range[0] < data_range[1]: assert data_range[0] <= t.min( ), f'Expected values to be greater or equal to {data_range[0]}, got {t.min()}' assert t.max() <= data_range[1 ], f'Expected values to be lower or equal to {data_range[1]}, got {t.max()}' def total_variation(x: 'torch.Tensor', reduction: 'str'='mean', norm_type: 'str'='l2') ->torch.Tensor: """Compute Total Variation metric Args: x: Tensor. Shape :math:`(N, C, H, W)`. reduction: Specifies the reduction type: ``'none'`` | ``'mean'`` | ``'sum'``. Default:``'mean'`` norm_type: ``'l1'`` | ``'l2'`` | ``'l2_squared'``, defines which type of norm to implement, isotropic or anisotropic. Returns: Total variation of a given tensor References: https://www.wikiwand.com/en/Total_variation_denoising https://remi.flamary.com/demos/proxtv.html """ _validate_input([x], dim_range=(4, 4), data_range=(0, -1)) if norm_type == 'l1': w_variance = torch.sum(torch.abs(x[:, :, :, 1:] - x[:, :, :, :-1]), dim=[1, 2, 3]) h_variance = torch.sum(torch.abs(x[:, :, 1:, :] - x[:, :, :-1, :]), dim=[1, 2, 3]) score = h_variance + w_variance elif norm_type == 'l2': w_variance = torch.sum(torch.pow(x[:, :, :, 1:] - x[:, :, :, :-1], 2), dim=[1, 2, 3]) h_variance = torch.sum(torch.pow(x[:, :, 1:, :] - x[:, :, :-1, :], 2), dim=[1, 2, 3]) score = torch.sqrt(h_variance + w_variance) elif norm_type == 'l2_squared': w_variance = torch.sum(torch.pow(x[:, :, :, 1:] - x[:, :, :, :-1], 2), dim=[1, 2, 3]) h_variance = torch.sum(torch.pow(x[:, :, 1:, :] - x[:, :, :-1, :], 2), dim=[1, 2, 3]) score = h_variance + w_variance else: raise ValueError( "Incorrect norm type, should be one of {'l1', 'l2', 'l2_squared'}") return _reduce(score, reduction) class TVLoss(_Loss): """Creates a criterion that measures the total variation of the the given input :math:`x`. If :attr:`norm_type` set to ``'l2'`` the loss can be described as: .. math:: TV(x) = \\sum_{N}\\sqrt{\\sum_{H, W, C}(|x_{:, :, i+1, j} - x_{:, :, i, j}|^2 + |x_{:, :, i, j+1} - x_{:, :, i, j}|^2)} Else if :attr:`norm_type` set to ``'l1'``: .. math:: TV(x) = \\sum_{N}\\sum_{H, W, C}(|x_{:, :, i+1, j} - x_{:, :, i, j}| + |x_{:, :, i, j+1} - x_{:, :, i, j}|) where :math:`N` is the batch size, `C` is the channel size. Args: norm_type: one of ``'l1'`` | ``'l2'`` | ``'l2_squared'`` reduction: Specifies the reduction type: ``'none'`` | ``'mean'`` | ``'sum'``. Default:``'mean'`` Examples: >>> loss = TVLoss() >>> x = torch.rand(3, 3, 256, 256, requires_grad=True) >>> output = loss(x) >>> output.backward() References: https://www.wikiwand.com/en/Total_variation_denoising https://remi.flamary.com/demos/proxtv.html """ def __init__(self, norm_type: 'str'='l2', reduction: 'str'='mean'): super().__init__() self.norm_type = norm_type self.reduction = reduction def forward(self, x: 'torch.Tensor') ->torch.Tensor: """Computation of Total Variation (TV) index as a loss function. Args: x: An input tensor. Shape :math:`(N, C, H, W)`. Returns: Value of TV loss to be minimized. """ score = total_variation(x, reduction=self.reduction, norm_type=self .norm_type) return score def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from typing import Tuple from torch.nn.modules.loss import _Loss from typing import List from typing import Optional assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_pow_sub_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 rnumel = 48 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex % 12 r2 = rindex // 12 x0 = xindex tmp0 = tl.load(in_ptr0 + (4 + r1 + 16 * r2 + 64 * x0), rmask & xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (r1 + 16 * r2 + 64 * x0), rmask & xmask, other=0.0 ) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask & xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_per_fused_pow_sub_sum_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 rnumel = 48 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex % 3 r2 = rindex // 3 x0 = xindex tmp0 = tl.load(in_ptr0 + (1 + r1 + 4 * r2 + 64 * x0), rmask & xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (r1 + 4 * r2 + 64 * x0), rmask & xmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask & xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_per_fused_add_mean_sqrt_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 + tmp1 tmp3 = libdevice.sqrt(tmp2) tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.sum(tmp4, 1)[:, None] tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp8, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_pow_sub_sum_0[grid(4)](arg0_1, buf0, 4, 48, XBLOCK =1, num_warps=2, num_stages=1) buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_pow_sub_sum_1[grid(4)](arg0_1, buf1, 4, 48, XBLOCK =1, num_warps=2, num_stages=1) del arg0_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused_add_mean_sqrt_2[grid(1)](buf3, buf0, buf1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 return buf3, def _reduce(x: 'torch.Tensor', reduction: 'str'='mean') ->torch.Tensor: """Reduce input in batch dimension if needed. Args: x: Tensor with shape (N, *). reduction: Specifies the reduction type: ``'none'`` | ``'mean'`` | ``'sum'``. Default: ``'mean'`` """ if reduction == 'none': return x elif reduction == 'mean': return x.mean(dim=0) elif reduction == 'sum': return x.sum(dim=0) else: raise ValueError( "Uknown reduction. Expected one of {'none', 'mean', 'sum'}") def _validate_input(tensors: 'List[torch.Tensor]', dim_range: 'Tuple[int, int]'=(0, -1), data_range: 'Tuple[float, float]'=(0.0, -1.0 ), size_range: 'Optional[Tuple[int, int]]'=None) ->None: """Check that input(-s) satisfies the requirements Args: tensors: Tensors to check dim_range: Allowed number of dimensions. (min, max) data_range: Allowed range of values in tensors. (min, max) size_range: Dimensions to include in size comparison. (start_dim, end_dim + 1) """ if not __debug__: return x = tensors[0] for t in tensors: assert torch.is_tensor(t), f'Expected torch.Tensor, got {type(t)}' assert t.device == x.device, f'Expected tensors to be on {x.device}, got {t.device}' if size_range is None: assert t.size() == x.size( ), f'Expected tensors with same size, got {t.size()} and {x.size()}' else: assert t.size()[size_range[0]:size_range[1]] == x.size()[size_range [0]:size_range[1] ], f'Expected tensors with same size at given dimensions, got {t.size()} and {x.size()}' if dim_range[0] == dim_range[1]: assert t.dim() == dim_range[0 ], f'Expected number of dimensions to be {dim_range[0]}, got {t.dim()}' elif dim_range[0] < dim_range[1]: assert dim_range[0] <= t.dim() <= dim_range[1 ], f'Expected number of dimensions to be between {dim_range[0]} and {dim_range[1]}, got {t.dim()}' if data_range[0] < data_range[1]: assert data_range[0] <= t.min( ), f'Expected values to be greater or equal to {data_range[0]}, got {t.min()}' assert t.max() <= data_range[1 ], f'Expected values to be lower or equal to {data_range[1]}, got {t.max()}' def total_variation(x: 'torch.Tensor', reduction: 'str'='mean', norm_type: 'str'='l2') ->torch.Tensor: """Compute Total Variation metric Args: x: Tensor. Shape :math:`(N, C, H, W)`. reduction: Specifies the reduction type: ``'none'`` | ``'mean'`` | ``'sum'``. Default:``'mean'`` norm_type: ``'l1'`` | ``'l2'`` | ``'l2_squared'``, defines which type of norm to implement, isotropic or anisotropic. Returns: Total variation of a given tensor References: https://www.wikiwand.com/en/Total_variation_denoising https://remi.flamary.com/demos/proxtv.html """ _validate_input([x], dim_range=(4, 4), data_range=(0, -1)) if norm_type == 'l1': w_variance = torch.sum(torch.abs(x[:, :, :, 1:] - x[:, :, :, :-1]), dim=[1, 2, 3]) h_variance = torch.sum(torch.abs(x[:, :, 1:, :] - x[:, :, :-1, :]), dim=[1, 2, 3]) score = h_variance + w_variance elif norm_type == 'l2': w_variance = torch.sum(torch.pow(x[:, :, :, 1:] - x[:, :, :, :-1], 2), dim=[1, 2, 3]) h_variance = torch.sum(torch.pow(x[:, :, 1:, :] - x[:, :, :-1, :], 2), dim=[1, 2, 3]) score = torch.sqrt(h_variance + w_variance) elif norm_type == 'l2_squared': w_variance = torch.sum(torch.pow(x[:, :, :, 1:] - x[:, :, :, :-1], 2), dim=[1, 2, 3]) h_variance = torch.sum(torch.pow(x[:, :, 1:, :] - x[:, :, :-1, :], 2), dim=[1, 2, 3]) score = h_variance + w_variance else: raise ValueError( "Incorrect norm type, should be one of {'l1', 'l2', 'l2_squared'}") return _reduce(score, reduction) class TVLossNew(_Loss): """Creates a criterion that measures the total variation of the the given input :math:`x`. If :attr:`norm_type` set to ``'l2'`` the loss can be described as: .. math:: TV(x) = \\sum_{N}\\sqrt{\\sum_{H, W, C}(|x_{:, :, i+1, j} - x_{:, :, i, j}|^2 + |x_{:, :, i, j+1} - x_{:, :, i, j}|^2)} Else if :attr:`norm_type` set to ``'l1'``: .. math:: TV(x) = \\sum_{N}\\sum_{H, W, C}(|x_{:, :, i+1, j} - x_{:, :, i, j}| + |x_{:, :, i, j+1} - x_{:, :, i, j}|) where :math:`N` is the batch size, `C` is the channel size. Args: norm_type: one of ``'l1'`` | ``'l2'`` | ``'l2_squared'`` reduction: Specifies the reduction type: ``'none'`` | ``'mean'`` | ``'sum'``. Default:``'mean'`` Examples: >>> loss = TVLoss() >>> x = torch.rand(3, 3, 256, 256, requires_grad=True) >>> output = loss(x) >>> output.backward() References: https://www.wikiwand.com/en/Total_variation_denoising https://remi.flamary.com/demos/proxtv.html """ def __init__(self, norm_type: 'str'='l2', reduction: 'str'='mean'): super().__init__() self.norm_type = norm_type self.reduction = reduction def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
photosynthesis-team/piq
TVLoss
false
16,256
[ "Apache-2.0" ]
471
79cccf887dd28ce57dea461972cda3648a79165a
https://github.com/photosynthesis-team/piq/tree/79cccf887dd28ce57dea461972cda3648a79165a
Nreparameterize
import torch from torch import nn as nn import torch.nn.functional as F from torch.distributions import Normal class Nreparameterize(nn.Module): """Reparametrize Gaussian variable.""" def __init__(self, input_dim, z_dim): super().__init__() self.input_dim = input_dim self.z_dim = z_dim self.sigma_linear = nn.Linear(input_dim, z_dim) self.mu_linear = nn.Linear(input_dim, z_dim) self.return_means = False self.mu, self.sigma, self.z = None, None, None def forward(self, x, n=1): self.mu = self.mu_linear(x) self.sigma = F.softplus(self.sigma_linear(x)) self.z = self.nsample(n=n) return self.z def kl(self): return -0.5 * torch.sum(1 + 2 * self.sigma.log() - self.mu.pow(2) - self.sigma ** 2, -1) def log_posterior(self): return self._log_posterior(self.z) def _log_posterior(self, z): return Normal(self.mu, self.sigma).log_prob(z).sum(-1) def log_prior(self): return Normal(torch.zeros_like(self.mu), torch.ones_like(self.sigma) ).log_prob(self.z).sum(-1) def nsample(self, n=1): if self.return_means: return self.mu.expand(n, -1, -1) eps = Normal(torch.zeros_like(self.mu), torch.ones_like(self.mu) ).sample((n,)) return self.mu + eps * self.sigma def deterministic(self): """Set to return means.""" self.return_means = True def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'z_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn as nn from torch.distributions import Normal assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_expand_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_expand_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 1.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_add_mul_softplus_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp6 = tl.load(in_ptr1 + x0, xmask) tmp7 = tl.load(in_ptr2 + x0, xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp8 = tmp7 * tmp5 tmp9 = tmp6 + tmp8 tl.store(out_ptr0 + x0, tmp5, xmask) tl.store(out_ptr1 + x0, tmp9, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf3 = empty_strided_cuda((1, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_expand_0[grid(256)](buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((1, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_expand_1[grid(256)](buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) buf5 = torch.ops.aten.normal.Tensor_Tensor(buf3, buf4) buf6 = buf5 del buf5 buf2 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 buf7 = buf3 del buf3 triton_poi_fused_add_mul_softplus_2[grid(256)](buf1, buf0, buf6, buf2, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf7, buf2, reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf6 class NreparameterizeNew(nn.Module): """Reparametrize Gaussian variable.""" def __init__(self, input_dim, z_dim): super().__init__() self.input_dim = input_dim self.z_dim = z_dim self.sigma_linear = nn.Linear(input_dim, z_dim) self.mu_linear = nn.Linear(input_dim, z_dim) self.return_means = False self.mu, self.sigma, self.z = None, None, None def kl(self): return -0.5 * torch.sum(1 + 2 * self.sigma.log() - self.mu.pow(2) - self.sigma ** 2, -1) def log_posterior(self): return self._log_posterior(self.z) def _log_posterior(self, z): return Normal(self.mu, self.sigma).log_prob(z).sum(-1) def log_prior(self): return Normal(torch.zeros_like(self.mu), torch.ones_like(self.sigma) ).log_prob(self.z).sum(-1) def nsample(self, n=1): if self.return_means: return self.mu.expand(n, -1, -1) eps = Normal(torch.zeros_like(self.mu), torch.ones_like(self.mu) ).sample((n,)) return self.mu + eps * self.sigma def deterministic(self): """Set to return means.""" self.return_means = True def forward(self, input_0): primals_1 = self.sigma_linear.weight primals_2 = self.sigma_linear.bias primals_4 = self.mu_linear.weight primals_5 = self.mu_linear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
pimdh/lie-vae
Nreparameterize
false
16,257
[ "MIT" ]
83
0e0cc4d533c064fcfc405e8a75449f8b2f6cf8cf
https://github.com/pimdh/lie-vae/tree/0e0cc4d533c064fcfc405e8a75449f8b2f6cf8cf
MAPELoss
import torch import torch.nn as nn class MAPELoss(nn.Module): def forward(self, estimation: 'torch.Tensor', target: 'torch.Tensor'): AER = torch.abs((target - estimation) / (target + 1e-10)) MAPE = AER.mean() * 100 return MAPE def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_div_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = 1e-10 tmp4 = tmp0 + tmp3 tmp5 = tmp2 / tmp4 tmp6 = tl_math.abs(tmp5) tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = 256.0 tmp11 = tmp9 / tmp10 tmp12 = 100.0 tmp13 = tmp11 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp13, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_add_div_mean_mul_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class MAPELossNew(nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
pmq20/gde
MAPELoss
false
16,258
[ "MIT" ]
131
fa4d4dacbcf00727bef76c4a641c72b94d5f8126
https://github.com/pmq20/gde/tree/fa4d4dacbcf00727bef76c4a641c72b94d5f8126
ContrastiveEmbeddingLoss
import torch from torch.nn.modules.loss import * import torch.nn as nn from torch.nn import * from torch.optim import * from torch.optim.lr_scheduler import * class ContrastiveEmbeddingLoss(nn.Module): """ Contrastive embedding loss paper: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf """ def __init__(self, margin=1.0, reduction='mean'): """ Constructor method for the ContrastiveEmbeddingLoss class. Args: margin: margin parameter. reduction: criterion reduction type. """ super().__init__() self.margin = margin self.reduction = reduction or 'none' def forward(self, embeddings_left, embeddings_right, distance_true): """ Forward propagation method for the contrastive loss. Args: embeddings_left: left objects embeddings embeddings_right: right objects embeddings distance_true: true distances Returns: loss """ diff = embeddings_left - embeddings_right distance_pred = torch.sqrt(torch.sum(torch.pow(diff, 2), 1)) bs = len(distance_true) margin_distance = self.margin - distance_pred margin_distance_ = torch.clamp(margin_distance, min=0.0) loss = (1 - distance_true) * torch.pow(distance_pred, 2 ) + distance_true * torch.pow(margin_distance_, 2) if self.reduction == 'mean': loss = torch.sum(loss) / 2.0 / bs elif self.reduction == 'sum': loss = torch.sum(loss) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch.nn.modules.loss import * import torch.nn as nn from torch.nn import * from torch.optim import * from torch.optim.lr_scheduler import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_pow_sqrt_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp9 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp10 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp14 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp15 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = libdevice.sqrt(tmp18) tl.store(out_ptr0 + x2, tmp19, xmask) @triton.jit def triton_per_fused_add_clamp_div_mul_pow_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex r0 = rindex % 64 tmp0 = tl.load(in_ptr0 + r2, None) tmp3 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp3 * tmp3 tmp5 = tmp2 * tmp4 tmp6 = tmp1 - tmp3 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8 * tmp8 tmp10 = tmp0 * tmp9 tmp11 = tmp5 + tmp10 tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = 0.25 tmp18 = tmp16 * tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pow_sqrt_sub_sum_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused_add_clamp_div_mul_pow_rsub_sum_1[grid(1)](buf2, arg2_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg2_1 del buf0 return buf2, class ContrastiveEmbeddingLossNew(nn.Module): """ Contrastive embedding loss paper: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf """ def __init__(self, margin=1.0, reduction='mean'): """ Constructor method for the ContrastiveEmbeddingLoss class. Args: margin: margin parameter. reduction: criterion reduction type. """ super().__init__() self.margin = margin self.reduction = reduction or 'none' def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
pokidyshev/catalyst
ContrastiveEmbeddingLoss
false
16,259
[ "Apache-2.0" ]
46
bfe2cc2af7b02bd954fb0b4a0cae8b350f56789a
https://github.com/pokidyshev/catalyst/tree/bfe2cc2af7b02bd954fb0b4a0cae8b350f56789a
Upsample
import torch import torch.nn as nn class Upsample(nn.Module): """PyTorch upsampling implementation. This module upsamples by inserting <i-1> zeros in between samples in the time dimension. It does not low pass filter after this and assumes that the filter is a separate module if desired. .. seealso:: rfml.ptradio.RRC Args: i (int, optional): Interpolation factor -- only integer valued sample rate conversions are allowed. Defaults to 8. Raises: ValueError: If i is less than 2. This module assumes that the input is formatted as BxCxIQxT. The time dimension is extended by a factor of *i*, as provided, and all other dimensions are untouched. """ def __init__(self, i: 'int'=8): if i < 2: raise ValueError( 'You must interpolate by at least a factor of 2, not {}'. format(i)) super(Upsample, self).__init__() self.i = i def forward(self, x: 'torch.Tensor') ->torch.Tensor: x = x.unsqueeze(-1) mask = x.data.new(x.shape[0], x.shape[1], x.shape[2], x.shape[3], self.i).zero_() mask[:, :, :, :, 0] = 1.0 x = mask * x x = x.reshape(x.shape[0], x.shape[1], x.shape[2], x.shape[3] * x. shape[4]) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_fill_lift_fresh_mul_zero_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp6 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp0 = x0 tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp7 = tmp5 * tmp6 tl.store(out_ptr0 + x2, tmp7, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 8), (512, 128, 32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_fill_lift_fresh_mul_zero_0[grid(2048)](arg0_1, buf0, 2048, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0), class UpsampleNew(nn.Module): """PyTorch upsampling implementation. This module upsamples by inserting <i-1> zeros in between samples in the time dimension. It does not low pass filter after this and assumes that the filter is a separate module if desired. .. seealso:: rfml.ptradio.RRC Args: i (int, optional): Interpolation factor -- only integer valued sample rate conversions are allowed. Defaults to 8. Raises: ValueError: If i is less than 2. This module assumes that the input is formatted as BxCxIQxT. The time dimension is extended by a factor of *i*, as provided, and all other dimensions are untouched. """ def __init__(self, i: 'int'=8): if i < 2: raise ValueError( 'You must interpolate by at least a factor of 2, not {}'. format(i)) super(UpsampleNew, self).__init__() self.i = i def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
plexixx/rfml
Upsample
false
16,260
[ "BSD-3-Clause" ]
61
c00633b2c2005d38f991c6b9e3fd855ca25166c4
https://github.com/plexixx/rfml/tree/c00633b2c2005d38f991c6b9e3fd855ca25166c4
ChebConv
import math import torch def cheb_conv(laplacian, inputs, weight): """Chebyshev convolution. Args: laplacian (:obj:`torch.sparse.Tensor`): The laplacian corresponding to the current sampling of the sphere. inputs (:obj:`torch.Tensor`): The current input data being forwarded. weight (:obj:`torch.Tensor`): The weights of the current layer. Returns: :obj:`torch.Tensor`: Inputs after applying Chebyshev convolution. """ B, V, Fin = inputs.shape K, Fin, Fout = weight.shape x0 = inputs.permute(1, 2, 0).contiguous() x0 = x0.view([V, Fin * B]) inputs = x0.unsqueeze(0) if K > 0: x1 = torch.sparse.mm(laplacian, x0) inputs = torch.cat((inputs, x1.unsqueeze(0)), 0) for _ in range(1, K - 1): x2 = 2 * torch.sparse.mm(laplacian, x1) - x0 inputs = torch.cat((inputs, x2.unsqueeze(0)), 0) x0, x1 = x1, x2 inputs = inputs.view([K, V, Fin, B]) inputs = inputs.permute(3, 1, 2, 0).contiguous() inputs = inputs.view([B * V, Fin * K]) weight = weight.view(Fin * K, Fout) inputs = inputs.matmul(weight) inputs = inputs.view([B, V, Fout]) return inputs class ChebConv(torch.nn.Module): """Graph convolutional layer. """ def __init__(self, in_channels, out_channels, kernel_size, bias=True, conv=cheb_conv): """Initialize the Chebyshev layer. Args: in_channels (int): Number of channels/features in the input graph. out_channels (int): Number of channels/features in the output graph. kernel_size (int): Number of trainable parameters per filter, which is also the size of the convolutional kernel. The order of the Chebyshev polynomials is kernel_size - 1. bias (bool): Whether to add a bias term. conv (callable): Function which will perform the actual convolution. """ super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self._conv = conv shape = kernel_size, in_channels, out_channels self.weight = torch.nn.Parameter(torch.Tensor(*shape)) if bias: self.bias = torch.nn.Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.kaiming_initialization() def kaiming_initialization(self): """Initialize weights and bias. """ std = math.sqrt(2 / (self.in_channels * self.kernel_size)) self.weight.data.normal_(0, std) if self.bias is not None: self.bias.data.fill_(0.01) def forward(self, laplacian, inputs): """Forward graph convolution. Args: laplacian (:obj:`torch.sparse.Tensor`): The laplacian corresponding to the current sampling of the sphere. inputs (:obj:`torch.Tensor`): The current input data being forwarded. Returns: :obj:`torch.Tensor`: The convoluted inputs. """ outputs = self._conv(laplacian, inputs, self.weight) if self.bias is not None: outputs += self.bias return outputs def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_mul_sub_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr1 + (x1 + 16 * y0), xmask & ymask, eviction_policy ='evict_last') tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp4 = tmp2 - tmp3 tl.store(out_ptr0 + (x1 + 16 * y0), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel y1 = yindex // 16 x2 = xindex y0 = yindex % 16 tmp0 = y1 tl.full([1, 1], 0, tl.int64) tmp3 = tl.full([1, 1], 3, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.broadcast_to(y1, [XBLOCK, YBLOCK]) tmp7 = tl.full([1, 1], 2, tl.int64) tmp8 = tmp5 < tmp7 tmp9 = tmp8 & tmp4 tmp10 = tl.full([1, 1], 1, tl.int64) tmp11 = tmp5 < tmp10 tmp12 = tmp11 & tmp9 tmp13 = tl.load(in_ptr0 + (y0 + 16 * x2), tmp12 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp14 = tmp5 >= tmp10 tmp15 = tmp14 & tmp9 tmp16 = tl.load(in_ptr1 + (x2 + 4 * y0), tmp15 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp17 = tl.where(tmp11, tmp13, tmp16) tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp9, tmp17, tmp18) tmp20 = tmp5 >= tmp7 tmp22 = tmp20 & tmp4 tmp23 = tl.load(in_ptr2 + (x2 + 4 * y0), tmp22 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp24 = 2.0 tmp25 = tmp23 * tmp24 tmp26 = tl.load(in_ptr0 + (y0 + 16 * x2), tmp22 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp27 = tmp25 - tmp26 tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp22, tmp27, tmp28) tmp30 = tl.where(tmp8, tmp19, tmp29) tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype) tmp32 = tl.where(tmp4, tmp30, tmp31) tmp33 = tmp0 >= tmp3 tl.full([1, 1], 4, tl.int64) tmp36 = tl.load(in_ptr3 + (x2 + 4 * y0), tmp33 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp37 = tmp36 * tmp24 tmp38 = tl.load(in_ptr1 + (x2 + 4 * y0), tmp33 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp39 = tmp37 - tmp38 tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype) tmp41 = tl.where(tmp33, tmp39, tmp40) tmp42 = tl.where(tmp4, tmp32, tmp41) tl.store(out_ptr0 + (y0 + 16 * x2 + 64 * y1), tmp42, xmask & ymask) @triton.jit def triton_poi_fused_clone_view_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 * (x0 // 4) + 16 * (x1 % 4) + 64 * (x0 % 4) + x1 // 4), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_add_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16), (16, 1), torch.float32) get_raw_stream(0) triton_poi_fused_zeros_0[grid(64)](buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(16, 4)](primals_3, buf1, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf2 = torch.ops.aten._sparse_addmm.default(reinterpret_tensor(buf0, (16, 4), (1, 16), 0), reinterpret_tensor(buf1, (16, 4), (1, 16), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), beta=0) buf3 = buf2 del buf2 buf4 = torch.ops.aten._sparse_addmm.default(reinterpret_tensor(buf0, (16, 4), (1, 16), 0), buf3, reinterpret_tensor(primals_2, (4, 4 ), (1, 4), 0), beta=0) buf5 = buf4 del buf4 buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32) triton_poi_fused_mul_sub_2[grid(4, 16)](buf5, buf1, buf6, 4, 16, XBLOCK=8, YBLOCK=4, num_warps=1, num_stages=1) buf7 = torch.ops.aten._sparse_addmm.default(reinterpret_tensor(buf0, (16, 4), (1, 16), 0), reinterpret_tensor(buf6, (16, 4), (1, 16), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), beta=0) del buf0 del buf6 del primals_2 buf8 = buf7 del buf7 buf9 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_cat_3[grid(64, 4)](buf1, buf3, buf5, buf8, buf9, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del buf1 del buf3 del buf5 buf10 = empty_strided_cuda((16, 16), (16, 1), torch.float32) triton_poi_fused_clone_view_4[grid(256)](buf9, buf10, 256, XBLOCK= 256, num_warps=4, num_stages=1) del buf9 buf11 = buf8 del buf8 extern_kernels.mm(buf10, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), out=buf11) del primals_1 buf12 = reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0) del buf11 triton_poi_fused_add_5[grid(64)](buf12, primals_4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_4 return buf12, reinterpret_tensor(buf10, (16, 16), (1, 16), 0) def cheb_conv(laplacian, inputs, weight): """Chebyshev convolution. Args: laplacian (:obj:`torch.sparse.Tensor`): The laplacian corresponding to the current sampling of the sphere. inputs (:obj:`torch.Tensor`): The current input data being forwarded. weight (:obj:`torch.Tensor`): The weights of the current layer. Returns: :obj:`torch.Tensor`: Inputs after applying Chebyshev convolution. """ B, V, Fin = inputs.shape K, Fin, Fout = weight.shape x0 = inputs.permute(1, 2, 0).contiguous() x0 = x0.view([V, Fin * B]) inputs = x0.unsqueeze(0) if K > 0: x1 = torch.sparse.mm(laplacian, x0) inputs = torch.cat((inputs, x1.unsqueeze(0)), 0) for _ in range(1, K - 1): x2 = 2 * torch.sparse.mm(laplacian, x1) - x0 inputs = torch.cat((inputs, x2.unsqueeze(0)), 0) x0, x1 = x1, x2 inputs = inputs.view([K, V, Fin, B]) inputs = inputs.permute(3, 1, 2, 0).contiguous() inputs = inputs.view([B * V, Fin * K]) weight = weight.view(Fin * K, Fout) inputs = inputs.matmul(weight) inputs = inputs.view([B, V, Fout]) return inputs class ChebConvNew(torch.nn.Module): """Graph convolutional layer. """ def __init__(self, in_channels, out_channels, kernel_size, bias=True, conv=cheb_conv): """Initialize the Chebyshev layer. Args: in_channels (int): Number of channels/features in the input graph. out_channels (int): Number of channels/features in the output graph. kernel_size (int): Number of trainable parameters per filter, which is also the size of the convolutional kernel. The order of the Chebyshev polynomials is kernel_size - 1. bias (bool): Whether to add a bias term. conv (callable): Function which will perform the actual convolution. """ super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self._conv = conv shape = kernel_size, in_channels, out_channels self.weight = torch.nn.Parameter(torch.Tensor(*shape)) if bias: self.bias = torch.nn.Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.kaiming_initialization() def kaiming_initialization(self): """Initialize weights and bias. """ std = math.sqrt(2 / (self.in_channels * self.kernel_size)) self.weight.data.normal_(0, std) if self.bias is not None: self.bias.data.fill_(0.01) def forward(self, input_0, input_1): primals_1 = self.weight primals_4 = self.bias primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
phil-hawkins/deepsphere-pytorch
ChebConv
false
16,261
[ "MIT" ]
99
f23c531445b3ddf234c7e98cdadb010163051e6d
https://github.com/phil-hawkins/deepsphere-pytorch/tree/f23c531445b3ddf234c7e98cdadb010163051e6d
RUM
import torch import torch.nn.functional as F import torch.nn as nn def rotation_components(x, y, eps=1e-12): size_batch = x.size()[0] hidden_size = x.size()[1] u = F.normalize(x, p=2, dim=1, eps=eps) costh = torch.sum(u * F.normalize(y, p=2, dim=1, eps=eps), dim=1).view( size_batch, 1) sinth = torch.sqrt(1 - costh ** 2).view(size_batch, 1) Rth = torch.cat((costh, -sinth, sinth, costh), dim=1).view(size_batch, 2, 2 ) v = F.normalize(y - torch.sum(u * y, 1).view(size_batch, 1) * u, p=2, dim=1, eps=eps) tmp = torch.cat((u.view(size_batch, 1, hidden_size), v.view(size_batch, 1, hidden_size)), dim=1) return u.view(size_batch, hidden_size, 1), v.view(size_batch, hidden_size, 1), tmp, Rth def rotate(v1, v2, v): size_batch = v1.size()[0] hidden_size = v1.size()[1] U = rotation_components(v1, v2) h = v.view(size_batch, hidden_size, 1) return v + (-torch.matmul(U[0], torch.matmul(U[0].transpose(1, 2), h)) - torch.matmul(U[1], torch.matmul(U[1].transpose(1, 2), h)) + torch. matmul(U[2].transpose(1, 2), torch.matmul(U[3], torch.matmul(U[2], h))) ).view(size_batch, hidden_size) def rotation_operator(x, y, eps=1e-12): hidden_size = x.size()[1] tmp_u, tmp_v, tmp, Rth = rotation_components(x, y, eps=eps) return torch.eye(hidden_size, device=x.device) - torch.matmul(tmp_u, torch.transpose(tmp_u, dim0=1, dim1=2)) - torch.matmul(tmp_v, torch .transpose(tmp_v, dim0=1, dim1=2)) + torch.matmul(torch.matmul( torch.transpose(tmp, dim0=1, dim1=2), Rth), tmp) class RUMCell(nn.Module): def __init__(self, input_size, hidden_size, eta_, lambda_, bias, eps, activation): super(RUMCell, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.eta_ = eta_ self.lambda_ = lambda_ self.bias = bias self.eps = eps if activation is None: activation = 'relu' if activation == 'tanh': self.activation = F.tanh elif activation == 'relu': self.activation = F.relu self.r_kernel = nn.Parameter(torch.Tensor(input_size + hidden_size, hidden_size)) nn.init.orthogonal_(self.r_kernel) self.r_bias = nn.Parameter(torch.Tensor(hidden_size)) nn.init.constant_(self.r_bias, 1.0) self.u_kernel = nn.Parameter(torch.Tensor(input_size + hidden_size, hidden_size)) nn.init.orthogonal_(self.u_kernel) self.u_bias = nn.Parameter(torch.Tensor(hidden_size)) nn.init.constant_(self.u_bias, 1.0) self.inp_emb_kernel = nn.Parameter(torch.Tensor(input_size, hidden_size)) nn.init.xavier_uniform_(self.inp_emb_kernel) self.inp_emb_bias = nn.Parameter(torch.Tensor(hidden_size)) nn.init.constant_(self.inp_emb_bias, 0.0) def forward(self, inputs, hidden, assoc_mem): if hidden is None: hidden = torch.zeros(inputs.size(0), self.hidden_size, device= inputs.device) if assoc_mem is None and self.lambda_ == 1: assoc_mem = torch.eye(self.hidden_size, device=inputs.device ).unsqueeze(0) r = torch.matmul(torch.cat((inputs, hidden), 1), self.r_kernel ) + self.r_bias u = torch.matmul(torch.cat((inputs, hidden), 1), self.u_kernel ) + self.u_bias x_emb = torch.matmul(inputs, self.inp_emb_kernel) + self.inp_emb_bias u = u.sigmoid() if self.lambda_ == 0: hidden_new = rotate(x_emb, r, hidden) elif self.lambda_ == 1: tmp_rotation = rotation_operator(x_emb, r) assoc_mem = torch.matmul(assoc_mem, tmp_rotation) hidden_new = torch.matmul(assoc_mem, hidden.unsqueeze(-1)).squeeze( -1) else: raise c = F.relu(hidden_new + x_emb) new_h = u * hidden + (1 - u) * c if self.eta_: new_h = F.normalize(new_h, p=2, dim=1, eps=self.eps) * self.eta_ return new_h, assoc_mem class RUM(nn.Module): def __init__(self, input_size, hidden_size, eta_=None, lambda_=0, bias= True, eps=1e-12, activation=None): super().__init__() self.rum_cell = RUMCell(input_size, hidden_size, eta_, lambda_, bias, eps, activation) def forward(self, input_, hidden=None, assoc_mem=None): outputs = [] for x in torch.unbind(input_, dim=1): hidden, assoc_mem = self.rum_cell(x, hidden, assoc_mem) outputs.append(hidden) return torch.stack(outputs, dim=1), hidden def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (16 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = 0.0 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp6, tmp9, tmp10) tmp12 = tl.where(tmp4, tmp5, tmp11) tl.store(out_ptr0 + x2, tmp12, xmask) @triton.jit def triton_poi_fused_add_clamp_min_linalg_vector_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl. constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + 1) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + 2) tmp13 = tl.broadcast_to(tmp12, [XBLOCK]) tmp17 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr1 + 3) tmp19 = tl.broadcast_to(tmp18, [XBLOCK]) tmp26 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp33 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp37 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp43 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last') tmp46 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp50 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp54 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tmp0 + tmp2 tmp4 = tmp3 * tmp3 tmp8 = tmp5 + tmp7 tmp9 = tmp8 * tmp8 tmp10 = tmp4 + tmp9 tmp14 = tmp11 + tmp13 tmp15 = tmp14 * tmp14 tmp16 = tmp10 + tmp15 tmp20 = tmp17 + tmp19 tmp21 = tmp20 * tmp20 tmp22 = tmp16 + tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-12 tmp25 = triton_helpers.maximum(tmp23, tmp24) tmp27 = tmp26 + tmp2 tmp28 = tmp27 * tmp27 tmp30 = tmp29 + tmp7 tmp31 = tmp30 * tmp30 tmp32 = tmp28 + tmp31 tmp34 = tmp33 + tmp13 tmp35 = tmp34 * tmp34 tmp36 = tmp32 + tmp35 tmp38 = tmp37 + tmp19 tmp39 = tmp38 * tmp38 tmp40 = tmp36 + tmp39 tmp41 = libdevice.sqrt(tmp40) tmp42 = triton_helpers.maximum(tmp41, tmp24) tmp44 = tmp43 + tmp2 tmp45 = tmp44 * tmp44 tmp47 = tmp46 + tmp7 tmp48 = tmp47 * tmp47 tmp49 = tmp45 + tmp48 tmp51 = tmp50 + tmp13 tmp52 = tmp51 * tmp51 tmp53 = tmp49 + tmp52 tmp55 = tmp54 + tmp19 tmp56 = tmp55 * tmp55 tmp57 = tmp53 + tmp56 tmp58 = libdevice.sqrt(tmp57) tmp59 = triton_helpers.maximum(tmp58, tmp24) tl.store(out_ptr0 + x0, tmp25, xmask) tl.store(out_ptr1 + x0, tmp42, xmask) tl.store(out_ptr2 + x0, tmp59, xmask) @triton.jit def triton_poi_fused_add_div_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 / tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_clamp_min_div_linalg_vector_norm_mul_sum_4(in_out_ptr0 , in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + 1) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + 2) tmp13 = tl.broadcast_to(tmp12, [XBLOCK]) tmp17 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr1 + 3) tmp19 = tl.broadcast_to(tmp18, [XBLOCK]) tmp26 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp33 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp37 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tmp0 + tmp2 tmp4 = tmp3 * tmp3 tmp8 = tmp5 + tmp7 tmp9 = tmp8 * tmp8 tmp10 = tmp4 + tmp9 tmp14 = tmp11 + tmp13 tmp15 = tmp14 * tmp14 tmp16 = tmp10 + tmp15 tmp20 = tmp17 + tmp19 tmp21 = tmp20 * tmp20 tmp22 = tmp16 + tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-12 tmp25 = triton_helpers.maximum(tmp23, tmp24) tmp27 = tmp3 / tmp25 tmp28 = tmp26 * tmp27 tmp30 = tmp8 / tmp25 tmp31 = tmp29 * tmp30 tmp32 = tmp28 + tmp31 tmp34 = tmp14 / tmp25 tmp35 = tmp33 * tmp34 tmp36 = tmp32 + tmp35 tmp38 = tmp20 / tmp25 tmp39 = tmp37 * tmp38 tmp40 = tmp36 + tmp39 tmp41 = tmp26 * tmp3 tmp42 = tmp29 * tmp8 tmp43 = tmp41 + tmp42 tmp44 = tmp33 * tmp14 tmp45 = tmp43 + tmp44 tmp46 = tmp37 * tmp20 tmp47 = tmp45 + tmp46 tl.store(in_out_ptr0 + x0, tmp40, xmask) tl.store(out_ptr0 + x0, tmp47, xmask) @triton.jit def triton_poi_fused_add_mul_sub_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + x2, xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 - tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_div_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_cat_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 2 x0 = xindex % 4 x2 = xindex // 8 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 2, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 4 * x2), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_cat_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + x1, tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp10 * tmp10 tmp12 = 1.0 tmp13 = tmp12 - tmp11 tmp14 = libdevice.sqrt(tmp13) tmp15 = -tmp14 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp9, tmp15, tmp16) tmp18 = tmp0 >= tmp7 tmp19 = tl.full([1], 3, tl.int64) tmp20 = tmp0 < tmp19 tmp21 = tmp18 & tmp20 tmp22 = tl.load(in_ptr0 + x1, tmp21 & xmask, eviction_policy= 'evict_last', other=0.0) tmp23 = tmp22 * tmp22 tmp24 = tmp12 - tmp23 tmp25 = libdevice.sqrt(tmp24) tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp21, tmp25, tmp26) tmp28 = tmp0 >= tmp19 tl.full([1], 4, tl.int64) tmp31 = tl.load(in_ptr0 + x1, tmp28 & xmask, eviction_policy= 'evict_last', other=0.0) tmp32 = tl.where(tmp21, tmp27, tmp31) tmp33 = tl.where(tmp9, tmp17, tmp32) tmp34 = tl.where(tmp4, tmp5, tmp33) tl.store(out_ptr0 + x2, tmp34, xmask) @triton.jit def triton_poi_fused_add_div_mul_relu_rsub_sigmoid_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr0 + x2, xmask) tmp4 = tl.load(in_ptr1 + x2, xmask) tmp8 = tl.load(in_ptr2 + x2, xmask) tmp9 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + x2, xmask) tmp15 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr6 + x2, xmask) tmp25 = tl.load(in_ptr7 + x1, xmask, eviction_policy='evict_last') tmp1 = -tmp0 tmp3 = tmp1 - tmp2 tmp5 = tmp3 + tmp4 tmp6 = 0.0 tmp7 = tmp6 + tmp5 tmp10 = tmp8 + tmp9 tmp11 = tmp7 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp16 = tmp14 + tmp15 tmp17 = tl.sigmoid(tmp16) tmp18 = tmp17 * tmp6 tmp19 = 1.0 tmp20 = tmp19 - tmp17 tmp21 = tmp20 * tmp13 tmp22 = tmp18 + tmp21 tmp24 = tmp23 + tmp9 tmp26 = tmp24 / tmp25 tl.store(in_out_ptr0 + x2, tmp13, xmask) tl.store(out_ptr0 + x2, tmp22, xmask) tl.store(out_ptr1 + x2, tmp26, xmask) @triton.jit def triton_poi_fused_cat_10(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 + 16 * x1 + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_div_mul_relu_rsub_sigmoid_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x2, xmask) tmp5 = tl.load(in_ptr2 + x2, xmask) tmp8 = tl.load(in_ptr3 + x2, xmask) tmp9 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr5 + x2, xmask) tmp15 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr7 + x2, xmask) tmp25 = tl.load(in_ptr8 + x1, xmask, eviction_policy='evict_last') tmp2 = -tmp1 tmp4 = tmp2 - tmp3 tmp6 = tmp4 + tmp5 tmp7 = tmp0 + tmp6 tmp10 = tmp8 + tmp9 tmp11 = tmp7 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp16 = tmp14 + tmp15 tmp17 = tl.sigmoid(tmp16) tmp18 = tmp17 * tmp0 tmp19 = 1.0 tmp20 = tmp19 - tmp17 tmp21 = tmp20 * tmp13 tmp22 = tmp18 + tmp21 tmp24 = tmp23 + tmp9 tmp26 = tmp24 / tmp25 tl.store(in_out_ptr0 + x2, tmp13, xmask) tl.store(out_ptr0 + x2, tmp22, xmask) tl.store(out_ptr1 + x2, tmp26, xmask) @triton.jit def triton_poi_fused_cat_12(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (8 + 16 * x1 + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_clamp_min_linalg_vector_norm_13(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + 1) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + 2) tmp13 = tl.broadcast_to(tmp12, [XBLOCK]) tmp17 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr1 + 3) tmp19 = tl.broadcast_to(tmp18, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tmp3 * tmp3 tmp8 = tmp5 + tmp7 tmp9 = tmp8 * tmp8 tmp10 = tmp4 + tmp9 tmp14 = tmp11 + tmp13 tmp15 = tmp14 * tmp14 tmp16 = tmp10 + tmp15 tmp20 = tmp17 + tmp19 tmp21 = tmp20 * tmp20 tmp22 = tmp16 + tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-12 tmp25 = triton_helpers.maximum(tmp23, tmp24) tl.store(out_ptr0 + x0, tmp25, xmask) @triton.jit def triton_poi_fused_cat_14(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (12 + 16 * x1 + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_mul_relu_rsub_sigmoid_15(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x2, xmask) tmp5 = tl.load(in_ptr2 + x2, xmask) tmp8 = tl.load(in_ptr3 + x2, xmask) tmp9 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr5 + x2, xmask) tmp15 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp2 = -tmp1 tmp4 = tmp2 - tmp3 tmp6 = tmp4 + tmp5 tmp7 = tmp0 + tmp6 tmp10 = tmp8 + tmp9 tmp11 = tmp7 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp16 = tmp14 + tmp15 tmp17 = tl.sigmoid(tmp16) tmp18 = tmp17 * tmp0 tmp19 = 1.0 tmp20 = tmp19 - tmp17 tmp21 = tmp20 * tmp13 tmp22 = tmp18 + tmp21 tl.store(in_out_ptr0 + x2, tmp13, xmask) tl.store(out_ptr0 + x2, tmp22, xmask) @triton.jit def triton_poi_fused_stack_16(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp19 = tl.load(in_ptr3 + (4 * x1 + (-12 + x0)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + x2, tmp22, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (8, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (8, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_zeros_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_cat_1[grid(32)](primals_1, buf1, 32, XBLOCK=32, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf1, primals_2, out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf1, primals_4, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 0), primals_6, out=buf4) buf26 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 4), primals_6, out=buf26) buf48 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 8), primals_6, out=buf48) buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf27 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf49 = empty_strided_cuda((4, 1), (1, 4), torch.float32) triton_poi_fused_add_clamp_min_linalg_vector_norm_2[grid(4)](buf4, primals_7, buf26, buf48, buf5, buf27, buf49, 4, XBLOCK=4, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_div_3[grid(16)](buf4, primals_7, buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = buf5 del buf5 buf8 = reinterpret_tensor(buf7, (4,), (1,), 0) del buf7 buf9 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_add_clamp_min_div_linalg_vector_norm_mul_sum_4[grid(4) ](buf8, buf2, primals_3, buf6, buf9, 4, XBLOCK=4, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_sub_5[grid(16)](buf2, primals_3, buf9, buf6, buf10, 16, XBLOCK=16, num_warps=1, num_stages=1) buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_6[grid(16)](buf10, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1) buf12 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf6, (4, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0), out=buf12) buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf6, (4, 4, 1), (4, 1, 1), 0 ), buf12, out=buf13) buf14 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf11, (4, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0), out=buf14) buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf11, (4, 4, 1), (4, 1, 1), 0), buf14, out=buf15) buf16 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32) triton_poi_fused_cat_7[grid(32)](buf6, buf11, buf16, 32, XBLOCK=32, num_warps=1, num_stages=1) buf17 = empty_strided_cuda((4, 2, 1), (2, 1, 1), torch.float32) extern_kernels.bmm(buf16, reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0), out=buf17) buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_cat_8[grid(16)](buf8, buf18, 16, XBLOCK=16, num_warps=1, num_stages=1) buf19 = empty_strided_cuda((4, 2, 1), (2, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf18, (4, 2, 2), (4, 2, 1), 0), buf17, out=buf19) buf20 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf16, (4, 4, 2), (8, 1, 4), 0), buf19, out=buf20) buf21 = reinterpret_tensor(buf13, (4, 4), (4, 1), 0) del buf13 buf22 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf28 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_div_mul_relu_rsub_sigmoid_9[grid(16)](buf21, buf15, buf20, buf4, primals_7, buf3, primals_5, buf26, buf27, buf22, buf28, 16, XBLOCK=16, num_warps=1, num_stages=1) buf23 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_cat_10[grid(32)](primals_1, buf22, buf23, 32, XBLOCK=32, num_warps=1, num_stages=1) buf24 = reinterpret_tensor(buf20, (4, 4), (4, 1), 0) del buf20 extern_kernels.mm(buf23, primals_2, out=buf24) buf25 = reinterpret_tensor(buf15, (4, 4), (4, 1), 0) del buf15 extern_kernels.mm(buf23, primals_4, out=buf25) buf29 = buf27 del buf27 buf30 = reinterpret_tensor(buf29, (4,), (1,), 0) del buf29 buf31 = buf8 del buf8 triton_poi_fused_add_clamp_min_div_linalg_vector_norm_mul_sum_4[grid(4) ](buf30, buf24, primals_3, buf28, buf31, 4, XBLOCK=4, num_warps =1, num_stages=1) buf32 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_sub_5[grid(16)](buf24, primals_3, buf31, buf28, buf32, 16, XBLOCK=16, num_warps=1, num_stages=1) buf33 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_6[grid(16)](buf32, buf33, 16, XBLOCK=16, num_warps=1, num_stages=1) buf34 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32) triton_poi_fused_cat_7[grid(32)](buf28, buf33, buf34, 32, XBLOCK=32, num_warps=1, num_stages=1) buf35 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf28, (4, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf22, (4, 4, 1), (4, 1, 1), 0), out=buf35) buf36 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf28, (4, 4, 1), (4, 1, 1), 0), buf35, out=buf36) buf37 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf33, (4, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf22, (4, 4, 1), (4, 1, 1), 0), out=buf37) buf38 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf33, (4, 4, 1), (4, 1, 1), 0), buf37, out=buf38) buf39 = empty_strided_cuda((4, 2, 1), (2, 1, 1), torch.float32) extern_kernels.bmm(buf34, reinterpret_tensor(buf22, (4, 4, 1), (4, 1, 1), 0), out=buf39) buf40 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_cat_8[grid(16)](buf30, buf40, 16, XBLOCK=16, num_warps=1, num_stages=1) buf41 = empty_strided_cuda((4, 2, 1), (2, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf40, (4, 2, 2), (4, 2, 1), 0), buf39, out=buf41) buf42 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf34, (4, 4, 2), (8, 1, 4), 0), buf41, out=buf42) buf43 = reinterpret_tensor(buf36, (4, 4), (4, 1), 0) del buf36 buf44 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf50 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_div_mul_relu_rsub_sigmoid_11[grid(16)](buf43, buf22, buf38, buf42, buf26, primals_7, buf25, primals_5, buf48, buf49, buf44, buf50, 16, XBLOCK=16, num_warps=1, num_stages=1) buf45 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_cat_12[grid(32)](primals_1, buf44, buf45, 32, XBLOCK=32, num_warps=1, num_stages=1) buf46 = reinterpret_tensor(buf42, (4, 4), (4, 1), 0) del buf42 extern_kernels.mm(buf45, primals_2, out=buf46) buf47 = reinterpret_tensor(buf38, (4, 4), (4, 1), 0) del buf38 extern_kernels.mm(buf45, primals_4, out=buf47) buf51 = buf49 del buf49 buf52 = reinterpret_tensor(buf51, (4,), (1,), 0) del buf51 buf53 = buf30 del buf30 triton_poi_fused_add_clamp_min_div_linalg_vector_norm_mul_sum_4[grid(4) ](buf52, buf46, primals_3, buf50, buf53, 4, XBLOCK=4, num_warps =1, num_stages=1) buf54 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_sub_5[grid(16)](buf46, primals_3, buf53, buf50, buf54, 16, XBLOCK=16, num_warps=1, num_stages=1) buf55 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_6[grid(16)](buf54, buf55, 16, XBLOCK=16, num_warps=1, num_stages=1) buf56 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32) triton_poi_fused_cat_7[grid(32)](buf50, buf55, buf56, 32, XBLOCK=32, num_warps=1, num_stages=1) buf57 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf50, (4, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf44, (4, 4, 1), (4, 1, 1), 0), out=buf57) buf58 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf50, (4, 4, 1), (4, 1, 1), 0), buf57, out=buf58) buf59 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf55, (4, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf44, (4, 4, 1), (4, 1, 1), 0), out=buf59) buf60 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf55, (4, 4, 1), (4, 1, 1), 0), buf59, out=buf60) buf61 = empty_strided_cuda((4, 2, 1), (2, 1, 1), torch.float32) extern_kernels.bmm(buf56, reinterpret_tensor(buf44, (4, 4, 1), (4, 1, 1), 0), out=buf61) buf62 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_cat_8[grid(16)](buf52, buf62, 16, XBLOCK=16, num_warps=1, num_stages=1) buf63 = empty_strided_cuda((4, 2, 1), (2, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf62, (4, 2, 2), (4, 2, 1), 0), buf61, out=buf63) buf64 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf56, (4, 4, 2), (8, 1, 4), 0), buf63, out=buf64) buf70 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 12 ), primals_6, out=buf70) del primals_6 buf71 = reinterpret_tensor(buf52, (4, 1), (1, 4), 0) del buf52 triton_poi_fused_add_clamp_min_linalg_vector_norm_13[grid(4)](buf70, primals_7, buf71, 4, XBLOCK=4, num_warps=1, num_stages=1) buf65 = reinterpret_tensor(buf58, (4, 4), (4, 1), 0) del buf58 buf66 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf72 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_div_mul_relu_rsub_sigmoid_11[grid(16)](buf65, buf44, buf60, buf64, buf48, primals_7, buf47, primals_5, buf70, buf71, buf66, buf72, 16, XBLOCK=16, num_warps=1, num_stages=1) buf67 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_cat_14[grid(32)](primals_1, buf66, buf67, 32, XBLOCK=32, num_warps=1, num_stages=1) buf68 = reinterpret_tensor(buf64, (4, 4), (4, 1), 0) del buf64 extern_kernels.mm(buf67, primals_2, out=buf68) buf69 = reinterpret_tensor(buf60, (4, 4), (4, 1), 0) del buf60 extern_kernels.mm(buf67, primals_4, out=buf69) buf73 = buf71 del buf71 buf74 = reinterpret_tensor(buf73, (4,), (1,), 0) del buf73 buf75 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_add_clamp_min_div_linalg_vector_norm_mul_sum_4[grid(4) ](buf74, buf68, primals_3, buf72, buf75, 4, XBLOCK=4, num_warps =1, num_stages=1) buf76 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_sub_5[grid(16)](buf68, primals_3, buf75, buf72, buf76, 16, XBLOCK=16, num_warps=1, num_stages=1) buf77 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_6[grid(16)](buf76, buf77, 16, XBLOCK=16, num_warps=1, num_stages=1) buf78 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32) triton_poi_fused_cat_7[grid(32)](buf72, buf77, buf78, 32, XBLOCK=32, num_warps=1, num_stages=1) buf79 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf72, (4, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf66, (4, 4, 1), (4, 1, 1), 0), out=buf79) buf80 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf72, (4, 4, 1), (4, 1, 1), 0), buf79, out=buf80) buf81 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf77, (4, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf66, (4, 4, 1), (4, 1, 1), 0), out=buf81) buf82 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf77, (4, 4, 1), (4, 1, 1), 0), buf81, out=buf82) buf83 = empty_strided_cuda((4, 2, 1), (2, 1, 1), torch.float32) extern_kernels.bmm(buf78, reinterpret_tensor(buf66, (4, 4, 1), (4, 1, 1), 0), out=buf83) buf84 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_cat_8[grid(16)](buf74, buf84, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf74 buf85 = empty_strided_cuda((4, 2, 1), (2, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf84, (4, 2, 2), (4, 2, 1), 0), buf83, out=buf85) buf86 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf78, (4, 4, 2), (8, 1, 4), 0), buf85, out=buf86) buf87 = reinterpret_tensor(buf80, (4, 4), (4, 1), 0) del buf80 buf88 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_relu_rsub_sigmoid_15[grid(16)](buf87, buf66, buf82, buf86, buf70, primals_7, buf69, primals_5, buf88, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf82 del buf86 buf89 = empty_strided_cuda((4, 16), (16, 1), torch.float32) triton_poi_fused_stack_16[grid(64)](buf22, buf44, buf66, buf88, buf89, 64, XBLOCK=64, num_warps=1, num_stages=1) return (reinterpret_tensor(buf89, (4, 4, 4), (16, 4, 1), 0), buf88, primals_3, primals_5, primals_7, buf0, buf2, buf3, buf4, buf6, reinterpret_tensor(buf9, (4, 1), (1, 1), 0), buf10, buf11, buf21, buf22, buf24, buf25, buf26, buf28, reinterpret_tensor(buf31, (4, 1), (1, 1), 0), buf32, buf33, buf34, buf43, buf44, buf46, buf47, buf48, buf50, reinterpret_tensor(buf53, (4, 1), (1, 1), 0), buf54, buf55, buf56, buf65, buf66, buf68, buf69, buf70, buf72, reinterpret_tensor (buf75, (4, 1), (1, 1), 0), buf76, buf77, buf78, buf87, reinterpret_tensor(buf85, (4, 1, 2), (2, 1, 1), 0), reinterpret_tensor(buf84, (4, 2, 2), (4, 1, 2), 0), reinterpret_tensor(buf83, (4, 1, 2), (2, 1, 1), 0), buf81, buf79, reinterpret_tensor(primals_1, (4, 4), (1, 16), 12), reinterpret_tensor(buf67, (8, 4), (1, 8), 0), reinterpret_tensor( primals_4, (4, 8), (1, 4), 0), reinterpret_tensor(primals_2, (4, 8), (1, 4), 0), reinterpret_tensor(buf63, (4, 1, 2), (2, 1, 1), 0), reinterpret_tensor(buf62, (4, 2, 2), (4, 1, 2), 0), reinterpret_tensor(buf61, (4, 1, 2), (2, 1, 1), 0), buf59, buf57, reinterpret_tensor(primals_1, (4, 4), (1, 16), 8), reinterpret_tensor(buf45, (8, 4), (1, 8), 0), reinterpret_tensor( buf41, (4, 1, 2), (2, 1, 1), 0), reinterpret_tensor(buf40, (4, 2, 2 ), (4, 1, 2), 0), reinterpret_tensor(buf39, (4, 1, 2), (2, 1, 1), 0 ), buf37, buf35, reinterpret_tensor(primals_1, (4, 4), (1, 16), 4), reinterpret_tensor(buf23, (8, 4), (1, 8), 0), buf16, reinterpret_tensor(buf19, (4, 1, 2), (2, 1, 1), 0), reinterpret_tensor(buf18, (4, 2, 2), (4, 1, 2), 0), reinterpret_tensor(buf17, (4, 1, 2), (2, 1, 1), 0), buf14, buf12, reinterpret_tensor(primals_1, (4, 4), (1, 16), 0), reinterpret_tensor(buf1, (8, 4), (1, 8), 0)) def rotation_components(x, y, eps=1e-12): size_batch = x.size()[0] hidden_size = x.size()[1] u = F.normalize(x, p=2, dim=1, eps=eps) costh = torch.sum(u * F.normalize(y, p=2, dim=1, eps=eps), dim=1).view( size_batch, 1) sinth = torch.sqrt(1 - costh ** 2).view(size_batch, 1) Rth = torch.cat((costh, -sinth, sinth, costh), dim=1).view(size_batch, 2, 2 ) v = F.normalize(y - torch.sum(u * y, 1).view(size_batch, 1) * u, p=2, dim=1, eps=eps) tmp = torch.cat((u.view(size_batch, 1, hidden_size), v.view(size_batch, 1, hidden_size)), dim=1) return u.view(size_batch, hidden_size, 1), v.view(size_batch, hidden_size, 1), tmp, Rth def rotate(v1, v2, v): size_batch = v1.size()[0] hidden_size = v1.size()[1] U = rotation_components(v1, v2) h = v.view(size_batch, hidden_size, 1) return v + (-torch.matmul(U[0], torch.matmul(U[0].transpose(1, 2), h)) - torch.matmul(U[1], torch.matmul(U[1].transpose(1, 2), h)) + torch. matmul(U[2].transpose(1, 2), torch.matmul(U[3], torch.matmul(U[2], h))) ).view(size_batch, hidden_size) def rotation_operator(x, y, eps=1e-12): hidden_size = x.size()[1] tmp_u, tmp_v, tmp, Rth = rotation_components(x, y, eps=eps) return torch.eye(hidden_size, device=x.device) - torch.matmul(tmp_u, torch.transpose(tmp_u, dim0=1, dim1=2)) - torch.matmul(tmp_v, torch .transpose(tmp_v, dim0=1, dim1=2)) + torch.matmul(torch.matmul( torch.transpose(tmp, dim0=1, dim1=2), Rth), tmp) class RUMCell(nn.Module): def __init__(self, input_size, hidden_size, eta_, lambda_, bias, eps, activation): super(RUMCell, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.eta_ = eta_ self.lambda_ = lambda_ self.bias = bias self.eps = eps if activation is None: activation = 'relu' if activation == 'tanh': self.activation = F.tanh elif activation == 'relu': self.activation = F.relu self.r_kernel = nn.Parameter(torch.Tensor(input_size + hidden_size, hidden_size)) nn.init.orthogonal_(self.r_kernel) self.r_bias = nn.Parameter(torch.Tensor(hidden_size)) nn.init.constant_(self.r_bias, 1.0) self.u_kernel = nn.Parameter(torch.Tensor(input_size + hidden_size, hidden_size)) nn.init.orthogonal_(self.u_kernel) self.u_bias = nn.Parameter(torch.Tensor(hidden_size)) nn.init.constant_(self.u_bias, 1.0) self.inp_emb_kernel = nn.Parameter(torch.Tensor(input_size, hidden_size)) nn.init.xavier_uniform_(self.inp_emb_kernel) self.inp_emb_bias = nn.Parameter(torch.Tensor(hidden_size)) nn.init.constant_(self.inp_emb_bias, 0.0) def forward(self, inputs, hidden, assoc_mem): if hidden is None: hidden = torch.zeros(inputs.size(0), self.hidden_size, device= inputs.device) if assoc_mem is None and self.lambda_ == 1: assoc_mem = torch.eye(self.hidden_size, device=inputs.device ).unsqueeze(0) r = torch.matmul(torch.cat((inputs, hidden), 1), self.r_kernel ) + self.r_bias u = torch.matmul(torch.cat((inputs, hidden), 1), self.u_kernel ) + self.u_bias x_emb = torch.matmul(inputs, self.inp_emb_kernel) + self.inp_emb_bias u = u.sigmoid() if self.lambda_ == 0: hidden_new = rotate(x_emb, r, hidden) elif self.lambda_ == 1: tmp_rotation = rotation_operator(x_emb, r) assoc_mem = torch.matmul(assoc_mem, tmp_rotation) hidden_new = torch.matmul(assoc_mem, hidden.unsqueeze(-1)).squeeze( -1) else: raise c = F.relu(hidden_new + x_emb) new_h = u * hidden + (1 - u) * c if self.eta_: new_h = F.normalize(new_h, p=2, dim=1, eps=self.eps) * self.eta_ return new_h, assoc_mem class RUMNew(nn.Module): def __init__(self, input_size, hidden_size, eta_=None, lambda_=0, bias= True, eps=1e-12, activation=None): super().__init__() self.rum_cell = RUMCell(input_size, hidden_size, eta_, lambda_, bias, eps, activation) def forward(self, input_0): primals_2 = self.rum_cell.r_kernel primals_3 = self.rum_cell.r_bias primals_4 = self.rum_cell.u_kernel primals_5 = self.rum_cell.u_bias primals_6 = self.rum_cell.inp_emb_kernel primals_7 = self.rum_cell.inp_emb_bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
omri123/rotational-unit-of-memory
RUM
false
16,262
[ "MIT" ]
82
e796c841e1e837df09497ba77c3bc285db47d02d
https://github.com/omri123/rotational-unit-of-memory/tree/e796c841e1e837df09497ba77c3bc285db47d02d
ContrastiveDistanceLoss
import torch from torch.nn.modules.loss import * import torch.nn as nn from torch.nn import * from torch.optim import * from torch.optim.lr_scheduler import * class ContrastiveDistanceLoss(nn.Module): """ Contrastive distance loss """ def __init__(self, margin=1.0, reduction='mean'): """ Constructor method for the ContrastiveDistanceLoss class. Args: margin: margin parameter. reduction: criterion reduction type. """ super().__init__() self.margin = margin self.reduction = reduction or 'none' def forward(self, distance_pred, distance_true): """ Forward propagation method for the contrastive loss. Args: distance_pred: predicted distances distance_true: true distances Returns: loss """ bs = len(distance_true) margin_distance = self.margin - distance_pred margin_distance_ = torch.clamp(margin_distance, min=0.0) loss = (1 - distance_true) * torch.pow(distance_pred, 2 ) + distance_true * torch.pow(margin_distance_, 2) if self.reduction == 'mean': loss = torch.sum(loss) / 2.0 / bs elif self.reduction == 'sum': loss = torch.sum(loss) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn.modules.loss import * import torch.nn as nn from torch.nn import * from torch.optim import * from torch.optim.lr_scheduler import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_clamp_div_mul_pow_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp3 * tmp3 tmp5 = tmp2 * tmp4 tmp6 = tmp1 - tmp3 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8 * tmp8 tmp10 = tmp0 * tmp9 tmp11 = tmp5 + tmp10 tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = 0.25 tmp18 = tmp16 * tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_clamp_div_mul_pow_rsub_sum_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class ContrastiveDistanceLossNew(nn.Module): """ Contrastive distance loss """ def __init__(self, margin=1.0, reduction='mean'): """ Constructor method for the ContrastiveDistanceLoss class. Args: margin: margin parameter. reduction: criterion reduction type. """ super().__init__() self.margin = margin self.reduction = reduction or 'none' def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
pokidyshev/catalyst
ContrastiveDistanceLoss
false
16,263
[ "Apache-2.0" ]
46
bfe2cc2af7b02bd954fb0b4a0cae8b350f56789a
https://github.com/pokidyshev/catalyst/tree/bfe2cc2af7b02bd954fb0b4a0cae8b350f56789a
ContrastivePairwiseEmbeddingLoss
import torch from torch.nn.modules.loss import * import torch.nn as nn import torch.nn.functional as F from torch.nn import * from torch.optim import * from torch.optim.lr_scheduler import * class ContrastivePairwiseEmbeddingLoss(nn.Module): """ ContrastivePairwiseEmbeddingLoss – proof of concept criterion. Still work in progress. """ def __init__(self, margin=1.0, reduction='mean'): """ Constructor method for the ContrastivePairwiseEmbeddingLoss class. Args: margin: margin parameter. reduction: criterion reduction type. """ super().__init__() self.margin = margin self.reduction = reduction or 'none' def forward(self, embeddings_pred, embeddings_true): """ Work in progress. Args: embeddings_pred: predicted embeddings embeddings_true: true embeddings Returns: loss """ device = embeddings_pred.device pairwise_similarity = torch.einsum('se,ae->sa', embeddings_pred, embeddings_true) bs = embeddings_pred.shape[0] batch_idx = torch.arange(bs, device=device) loss = F.cross_entropy(pairwise_similarity, batch_idx, reduction= self.reduction) return loss def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn.modules.loss import * import torch.nn as nn from torch.nn import * from torch.optim import * from torch.optim.lr_scheduler import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_per_fused_arange_nll_loss_forward_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp6 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp0 = r0 tmp1 = tl.full([1, 1], -100, tl.int64) tmp2 = tmp0 != tmp1 tmp3 = tl.full([1, 1], 0, tl.int64) tmp4 = tl.where(tmp2, tmp0, tmp3) tmp5 = tl.load(in_ptr0 + (tmp4 + 4 * r0), None, eviction_policy= 'evict_last') tmp7 = tl_math.exp(tmp6) tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp12 = tl_math.exp(tmp11) tmp13 = tmp10 + tmp12 tmp15 = tl_math.exp(tmp14) tmp16 = tmp13 + tmp15 tmp17 = tl_math.log(tmp16) tmp18 = tmp5 - tmp17 tmp19 = -tmp18 tmp20 = 0.0 tmp21 = tl.where(tmp2, tmp19, tmp20) tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK]) tmp24 = tl.sum(tmp22, 1)[:, None] tmp25 = tmp2.to(tl.int64) tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp28 = tl.sum(tmp26, 1)[:, None] tmp29 = tmp28.to(tl.float32) tmp30 = tmp24 / tmp29 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp30, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (1, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg1_1, (1, 4, 4), (0, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(16)](buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 buf2 = empty_strided_cuda((), (), torch.float32) buf4 = buf2 del buf2 triton_per_fused_arange_nll_loss_forward_1[grid(1)](buf4, buf1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf1 return buf4, class ContrastivePairwiseEmbeddingLossNew(nn.Module): """ ContrastivePairwiseEmbeddingLoss – proof of concept criterion. Still work in progress. """ def __init__(self, margin=1.0, reduction='mean'): """ Constructor method for the ContrastivePairwiseEmbeddingLoss class. Args: margin: margin parameter. reduction: criterion reduction type. """ super().__init__() self.margin = margin self.reduction = reduction or 'none' def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
pokidyshev/catalyst
ContrastivePairwiseEmbeddingLoss
false
16,264
[ "Apache-2.0" ]
46
bfe2cc2af7b02bd954fb0b4a0cae8b350f56789a
https://github.com/pokidyshev/catalyst/tree/bfe2cc2af7b02bd954fb0b4a0cae8b350f56789a
GCNModelVAE
from torch.nn import Module import torch import torch.nn.functional as F from torch.nn.modules.module import Module from torch.nn.parameter import Parameter import torch.nn as nn import torch.nn.modules.loss class GraphConvolution(Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, dropout=0.0, act=F.relu): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.dropout = dropout self.act = act self.weight = Parameter(torch.FloatTensor(in_features, out_features)) self.reset_parameters() def reset_parameters(self): torch.nn.init.xavier_uniform_(self.weight) def forward(self, input, adj): input = F.dropout(input, self.dropout, self.training) support = torch.mm(input, self.weight) output = torch.spmm(adj, support) output = self.act(output) return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class InnerProductDecoder(nn.Module): """Decoder for using inner product for prediction.""" def __init__(self, dropout, act=torch.sigmoid): super(InnerProductDecoder, self).__init__() self.dropout = dropout self.act = act def forward(self, z): z = F.dropout(z, self.dropout, training=self.training) adj = self.act(torch.mm(z, z.t())) return adj class GCNModelVAE(nn.Module): def __init__(self, input_feat_dim, hidden_dim1, hidden_dim2, dropout): super(GCNModelVAE, self).__init__() self.gc1 = GraphConvolution(input_feat_dim, hidden_dim1, dropout, act=F.relu) self.gc2 = GraphConvolution(hidden_dim1, hidden_dim2, dropout, act= lambda x: x) self.gc3 = GraphConvolution(hidden_dim1, hidden_dim2, dropout, act= lambda x: x) self.dc = InnerProductDecoder(dropout, act=lambda x: x) def encode(self, x, adj): hidden1 = self.gc1(x, adj) return self.gc2(hidden1, adj), self.gc3(hidden1, adj) def reparameterize(self, mu, logvar): if self.training: std = torch.exp(logvar) eps = torch.randn_like(std) return eps.mul(std).add_(mu) else: return mu def forward(self, x, adj): mu, logvar = self.encode(x, adj) z = self.reparameterize(mu, logvar) return z, mu, logvar def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_feat_dim': 4, 'hidden_dim1': 4, 'hidden_dim2': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import Module import torch.nn.functional as F from torch.nn.modules.module import Module from torch.nn.parameter import Parameter import torch.nn as nn import torch.nn.modules.loss assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf0, out=buf1) buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_relu_0[grid(16)](buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = buf0 del buf0 extern_kernels.mm(buf2, primals_4, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf3, out=buf4) buf5 = buf3 del buf3 extern_kernels.mm(buf2, primals_5, out=buf5) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf5, out=buf6) del buf5 return buf4, buf6, buf2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0) class GraphConvolution(Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, dropout=0.0, act=F.relu): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.dropout = dropout self.act = act self.weight = Parameter(torch.FloatTensor(in_features, out_features)) self.reset_parameters() def reset_parameters(self): torch.nn.init.xavier_uniform_(self.weight) def forward(self, input, adj): input = F.dropout(input, self.dropout, self.training) support = torch.mm(input, self.weight) output = torch.spmm(adj, support) output = self.act(output) return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class InnerProductDecoder(nn.Module): """Decoder for using inner product for prediction.""" def __init__(self, dropout, act=torch.sigmoid): super(InnerProductDecoder, self).__init__() self.dropout = dropout self.act = act def forward(self, z): z = F.dropout(z, self.dropout, training=self.training) adj = self.act(torch.mm(z, z.t())) return adj class GCNModelVAENew(nn.Module): def __init__(self, input_feat_dim, hidden_dim1, hidden_dim2, dropout): super(GCNModelVAENew, self).__init__() self.gc1 = GraphConvolution(input_feat_dim, hidden_dim1, dropout, act=F.relu) self.gc2 = GraphConvolution(hidden_dim1, hidden_dim2, dropout, act= lambda x: x) self.gc3 = GraphConvolution(hidden_dim1, hidden_dim2, dropout, act= lambda x: x) self.dc = InnerProductDecoder(dropout, act=lambda x: x) def encode(self, x, adj): hidden1 = self.gc1(x, adj) return self.gc2(hidden1, adj), self.gc3(hidden1, adj) def reparameterize(self, mu, logvar): if self.training: std = torch.exp(logvar) eps = torch.randn_like(std) return eps.mul(std).add_(mu) else: return mu def forward(self, input_0, input_1): primals_1 = self.gc1.weight primals_2 = self.gc2.weight primals_3 = self.gc3.weight primals_4 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1], output[2]
peterfeifanchen/scGNN
GCNModelVAE
false
16,265
[ "MIT" ]
60
4ef9013ad0f44f9f51708e9bb60e5138f5706593
https://github.com/peterfeifanchen/scGNN/tree/4ef9013ad0f44f9f51708e9bb60e5138f5706593
Shared
from _paritybench_helpers import _mock_config import torch import torch.utils.data import torch.optim import torch.utils.data.distributed class Shared(torch.nn.Module): def __init__(self, args): super(Shared, self).__init__() ncha, self.size, _ = args.inputsize self.taskcla = args.taskcla self.latent_dim = args.latent_dim self.nhid = args.units self.nlayers = args.nlayers self.relu = torch.nn.ReLU() self.drop = torch.nn.Dropout(0.2) self.fc1 = torch.nn.Linear(ncha * self.size * self.size, self.nhid) if self.nlayers == 3: self.fc2 = torch.nn.Linear(self.nhid, self.nhid) self.fc3 = torch.nn.Linear(self.nhid, self.latent_dim) else: self.fc2 = torch.nn.Linear(self.nhid, self.latent_dim) def forward(self, x_s): h = x_s.view(x_s.size(0), -1) h = self.drop(self.relu(self.fc1(h))) h = self.drop(self.relu(self.fc2(h))) if self.nlayers == 3: h = self.drop(self.relu(self.fc3(h))) return h def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'args': _mock_config(inputsize=[4, 4, 4], taskcla=4, latent_dim=4, units=4, nlayers=1)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch.optim import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 64), (64, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 64), (64, 1), 0 ), reinterpret_tensor(primals_2, (64, 4), (1, 64), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(16)](buf1, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf2) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(16)](buf3, primals_5, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 return buf3, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0 ), buf1, buf4, primals_4 class SharedNew(torch.nn.Module): def __init__(self, args): super(SharedNew, self).__init__() ncha, self.size, _ = args.inputsize self.taskcla = args.taskcla self.latent_dim = args.latent_dim self.nhid = args.units self.nlayers = args.nlayers self.relu = torch.nn.ReLU() self.drop = torch.nn.Dropout(0.2) self.fc1 = torch.nn.Linear(ncha * self.size * self.size, self.nhid) if self.nlayers == 3: self.fc2 = torch.nn.Linear(self.nhid, self.nhid) self.fc3 = torch.nn.Linear(self.nhid, self.latent_dim) else: self.fc2 = torch.nn.Linear(self.nhid, self.latent_dim) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Prathyusha-Akundi/Adversarial-Continual-Learning
Shared
false
16,266
[ "MIT" ]
237
edf4bbd2c4c61f1cc20818793702ef8c6cf4e0df
https://github.com/Prathyusha-Akundi/Adversarial-Continual-Learning/tree/edf4bbd2c4c61f1cc20818793702ef8c6cf4e0df
LearnedPositionalEncoding
import torch import torch.nn as nn import torch.optim class LearnedPositionalEncoding(nn.Module): def __init__(self, max_position_embeddings, embedding_dim, seq_length): super(LearnedPositionalEncoding, self).__init__() self.position_embeddings = nn.Parameter(torch.zeros(1, seq_length, 512) ) def forward(self, x, position_ids=None): position_embeddings = self.position_embeddings return x + position_embeddings def get_inputs(): return [torch.rand([4, 4, 4, 512])] def get_init_inputs(): return [[], {'max_position_embeddings': 4, 'embedding_dim': 4, 'seq_length': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x2, tmp2, None) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, 4, 512), (2048, 512, 1)) assert_size_stride(primals_2, (4, 4, 4, 512), (8192, 2048, 512, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(32768)](primals_2, primals_1, buf0, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, class LearnedPositionalEncodingNew(nn.Module): def __init__(self, max_position_embeddings, embedding_dim, seq_length): super(LearnedPositionalEncodingNew, self).__init__() self.position_embeddings = nn.Parameter(torch.zeros(1, seq_length, 512) ) def forward(self, input_0): primals_1 = self.position_embeddings primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
potpov/TransBTS
LearnedPositionalEncoding
false
16,267
[ "Apache-2.0" ]
163
658de5f1dde17d25db54fb07adf49370cc32d7c3
https://github.com/potpov/TransBTS/tree/658de5f1dde17d25db54fb07adf49370cc32d7c3
Adder2D
from torch.autograd import Function import math import torch import torch.nn as nn from torch.autograd.function import Function def adder2d_function(X, W, stride=1, padding=0): n_filters, _d_filter, h_filter, w_filter = W.size() n_x, _d_x, h_x, w_x = X.size() h_out = (h_x - h_filter + 2 * padding) / stride + 1 w_out = (w_x - w_filter + 2 * padding) / stride + 1 h_out, w_out = int(h_out), int(w_out) X_col = torch.nn.functional.unfold(X.view(1, -1, h_x, w_x), h_filter, dilation=1, padding=padding, stride=stride).view(n_x, -1, h_out * w_out ) X_col = X_col.permute(1, 2, 0).contiguous().view(X_col.size(1), -1) W_col = W.view(n_filters, -1) out = adder.apply(W_col, X_col) out = out.view(n_filters, h_out, w_out, n_x) out = out.permute(3, 0, 1, 2).contiguous() return out class adder(Function): @staticmethod def forward(ctx, W_col, X_col): ctx.save_for_backward(W_col, X_col) output = -(W_col.unsqueeze(2) - X_col.unsqueeze(0)).abs().sum(1) return output @staticmethod def backward(ctx, grad_output): W_col, X_col = ctx.saved_tensors grad_W_col = ((X_col.unsqueeze(0) - W_col.unsqueeze(2)) * grad_output.unsqueeze(1)).sum(2) grad_W_col = grad_W_col / grad_W_col.norm(p=2).clamp(min=1e-12 ) * math.sqrt(W_col.size(1) * W_col.size(0)) / 5 grad_X_col = (-(X_col.unsqueeze(0) - W_col.unsqueeze(2)).clamp(-1, 1) * grad_output.unsqueeze(1)).sum(0) return grad_W_col, grad_X_col class Adder2D(nn.Module): def __init__(self, input_channel, output_channel, kernel_size, stride=1, padding=0, bias=False, quantize=False, weight_bits=8, sparsity=0): super(Adder2D, self).__init__() self.stride = stride self.padding = padding self.input_channel = input_channel self.output_channel = output_channel self.kernel_size = kernel_size self.adder = torch.nn.Parameter(nn.init.normal_(torch.randn( output_channel, input_channel, kernel_size, kernel_size))) self.bias = bias if bias: self.b = torch.nn.Parameter(nn.init.uniform_(torch.zeros( output_channel))) def forward(self, x): output = adder2d_function(x, self.adder, self.stride, self.padding) if self.bias: output += self.b.unsqueeze(0).unsqueeze(2).unsqueeze(3) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channel': 4, 'output_channel': 4, 'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch.autograd import Function import math import torch.nn as nn from torch.autograd.function import Function assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 64 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_per_fused_abs_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x1 = xindex // 4 x0 = xindex % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 64 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + (x0 + 4 * r2), xmask, eviction_policy= 'evict_last', other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tl.store(out_ptr0 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tmp1 = -tmp0 tl.store(out_ptr0 + (x1 + 4 * y0), tmp1, xmask & ymask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1, 4), (4, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64, 4)](primals_2, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_per_fused_abs_sub_sum_1[grid(16)](primals_1, buf0, buf1, 16, 64, XBLOCK=1, num_warps=2, num_stages=1) buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused_clone_2[grid(4, 4)](buf1, buf2, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) del buf1 return buf2, primals_1, reinterpret_tensor(buf0, (1, 64, 4), (256, 4, 1), 0 ) def adder2d_function(X, W, stride=1, padding=0): n_filters, _d_filter, h_filter, w_filter = W.size() n_x, _d_x, h_x, w_x = X.size() h_out = (h_x - h_filter + 2 * padding) / stride + 1 w_out = (w_x - w_filter + 2 * padding) / stride + 1 h_out, w_out = int(h_out), int(w_out) X_col = torch.nn.functional.unfold(X.view(1, -1, h_x, w_x), h_filter, dilation=1, padding=padding, stride=stride).view(n_x, -1, h_out * w_out ) X_col = X_col.permute(1, 2, 0).contiguous().view(X_col.size(1), -1) W_col = W.view(n_filters, -1) out = adder.apply(W_col, X_col) out = out.view(n_filters, h_out, w_out, n_x) out = out.permute(3, 0, 1, 2).contiguous() return out class adder(Function): @staticmethod def forward(ctx, W_col, X_col): ctx.save_for_backward(W_col, X_col) output = -(W_col.unsqueeze(2) - X_col.unsqueeze(0)).abs().sum(1) return output @staticmethod def backward(ctx, grad_output): W_col, X_col = ctx.saved_tensors grad_W_col = ((X_col.unsqueeze(0) - W_col.unsqueeze(2)) * grad_output.unsqueeze(1)).sum(2) grad_W_col = grad_W_col / grad_W_col.norm(p=2).clamp(min=1e-12 ) * math.sqrt(W_col.size(1) * W_col.size(0)) / 5 grad_X_col = (-(X_col.unsqueeze(0) - W_col.unsqueeze(2)).clamp(-1, 1) * grad_output.unsqueeze(1)).sum(0) return grad_W_col, grad_X_col class Adder2DNew(nn.Module): def __init__(self, input_channel, output_channel, kernel_size, stride=1, padding=0, bias=False, quantize=False, weight_bits=8, sparsity=0): super(Adder2DNew, self).__init__() self.stride = stride self.padding = padding self.input_channel = input_channel self.output_channel = output_channel self.kernel_size = kernel_size self.adder = torch.nn.Parameter(nn.init.normal_(torch.randn( output_channel, input_channel, kernel_size, kernel_size))) self.bias = bias if bias: self.b = torch.nn.Parameter(nn.init.uniform_(torch.zeros( output_channel))) def forward(self, input_0): primals_1 = self.adder primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
poppin-mice/ShiftAddNet
Adder2D
false
16,268
[ "MIT" ]
55
a17369a50da5bba6250fdeac7c065bd00f293f3c
https://github.com/poppin-mice/ShiftAddNet/tree/a17369a50da5bba6250fdeac7c065bd00f293f3c
DeiTOutput
from _paritybench_helpers import _mock_config import torch from torch import nn import torch.utils.checkpoint class DeiTOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(intermediate_size=4, hidden_size=4, hidden_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_0[grid(256)](buf1, primals_2, primals_4, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_4 return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class DeiTOutputNew(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_0, input_1): primals_1 = self.dense.weight primals_2 = self.dense.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
jxhe/unify-parameter-efficient-tuning
DeiTOutput
false
16,269
[ "Apache-2.0" ]
101
3222ce2c0079566a28043e22380eb4ab6ad14389
https://github.com/jxhe/unify-parameter-efficient-tuning/tree/3222ce2c0079566a28043e22380eb4ab6ad14389
four_layer_conv
import torch class four_layer_conv(torch.nn.Module): def __init__(self): super(four_layer_conv, self).__init__() self.relu = torch.nn.ReLU(inplace=True) self.fcn1 = torch.nn.Conv2d(256, 256, 3, stride=1, padding=1) self.fcn2 = torch.nn.Conv2d(256, 256, 3, stride=1, padding=1) self.fcn3 = torch.nn.Conv2d(256, 256, 3, stride=1, padding=1) self.fcn4 = torch.nn.Conv2d(256, 256, 3, stride=1, padding=1) def forward(self, x): x = self.relu(self.fcn1(x)) x = self.relu(self.fcn2(x)) x = self.relu(self.fcn3(x)) x = self.relu(self.fcn4(x)) return x def get_inputs(): return [torch.rand([4, 256, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 256 * x2 + 1048576 * y1), tmp0, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y0 = yindex % 256 y1 = yindex // 256 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 1048576 * y1), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 4096 * y3), tmp4, None) tl.store(out_ptr1 + (y0 + 256 * x2 + 1048576 * y1), tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 256, 64, 64), (1048576, 4096, 64, 1)) assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_9, (256,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(65536, 9)](primals_1, buf0, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256 ), torch.float32) triton_poi_fused_1[grid(1024, 4096)](primals_3, buf1, 1024, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_0[grid(65536, 9)](primals_4, buf2, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_0[grid(65536, 9)](primals_6, buf3, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_0[grid(65536, 9)](primals_8, buf4, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf6 = buf5 del buf5 triton_poi_fused_convolution_relu_2[grid(4194304)](buf6, primals_2, 4194304, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf7 = extern_kernels.convolution(buf6, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf8 = buf7 del buf7 triton_poi_fused_convolution_relu_2[grid(4194304)](buf8, primals_5, 4194304, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf9 = extern_kernels.convolution(buf8, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf10 = buf9 del buf9 triton_poi_fused_convolution_relu_2[grid(4194304)](buf10, primals_7, 4194304, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf11 = extern_kernels.convolution(buf10, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf12 = empty_strided_cuda((4, 256, 64, 64), (1048576, 4096, 64, 1), torch.float32) buf13 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_3[grid(1024, 4096) ](buf11, primals_9, buf12, buf13, 1024, 4096, XBLOCK=32, YBLOCK =32, num_warps=4, num_stages=1) del buf11 del primals_9 return buf12, buf0, buf1, buf2, buf3, buf4, buf6, buf8, buf10, buf13 class four_layer_convNew(torch.nn.Module): def __init__(self): super(four_layer_convNew, self).__init__() self.relu = torch.nn.ReLU(inplace=True) self.fcn1 = torch.nn.Conv2d(256, 256, 3, stride=1, padding=1) self.fcn2 = torch.nn.Conv2d(256, 256, 3, stride=1, padding=1) self.fcn3 = torch.nn.Conv2d(256, 256, 3, stride=1, padding=1) self.fcn4 = torch.nn.Conv2d(256, 256, 3, stride=1, padding=1) def forward(self, input_0): primals_1 = self.fcn1.weight primals_2 = self.fcn1.bias primals_4 = self.fcn2.weight primals_5 = self.fcn2.bias primals_6 = self.fcn3.weight primals_7 = self.fcn3.bias primals_8 = self.fcn4.weight primals_9 = self.fcn4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
peckjon/detectorch
four_layer_conv
false
16,270
[ "Apache-2.0" ]
627
69d31250d79a72b12b7419638ef59163f833bbba
https://github.com/peckjon/detectorch/tree/69d31250d79a72b12b7419638ef59163f833bbba
ConcatSquashLinear
from torch.nn import Module import torch from torch.nn import Linear import torch.utils.tensorboard class ConcatSquashLinear(Module): def __init__(self, dim_in, dim_out, dim_ctx): super(ConcatSquashLinear, self).__init__() self._layer = Linear(dim_in, dim_out) self._hyper_bias = Linear(dim_ctx, dim_out, bias=False) self._hyper_gate = Linear(dim_ctx, dim_out) def forward(self, ctx, x): gate = torch.sigmoid(self._hyper_gate(ctx)) bias = self._hyper_bias(ctx) ret = self._layer(x) * gate + bias return ret def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim_in': 4, 'dim_out': 4, 'dim_ctx': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module from torch.nn import Linear import torch.utils.tensorboard assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp4 = tl.load(in_out_ptr0 + x0, xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tl.store(in_out_ptr0 + x0, tmp5, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(primals_7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf2) del primals_5 del primals_6 buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 get_raw_stream(0) triton_poi_fused_add_mul_sigmoid_0[grid(256)](buf3, buf2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(primals_7, (64, 4), (4, 1), 0), buf2 class ConcatSquashLinearNew(Module): def __init__(self, dim_in, dim_out, dim_ctx): super(ConcatSquashLinearNew, self).__init__() self._layer = Linear(dim_in, dim_out) self._hyper_bias = Linear(dim_ctx, dim_out, bias=False) self._hyper_gate = Linear(dim_ctx, dim_out) def forward(self, input_0, input_1): primals_1 = self._layer.weight primals_2 = self._layer.bias primals_4 = self._hyper_bias.weight primals_5 = self._hyper_gate.weight primals_6 = self._hyper_gate.bias primals_3 = input_0 primals_7 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
entc-17-fyp-05/diffusion-point-cloud
ConcatSquashLinear
false
16,271
[ "MIT" ]
138
cde2e501855dea31496ddffad16f40aa588e3af8
https://github.com/entc-17-fyp-05/diffusion-point-cloud/tree/cde2e501855dea31496ddffad16f40aa588e3af8
S2S2Mean
import torch from torch import nn as nn def s2s2_gram_schmidt(v1, v2): """Normalise 2 3-vectors. Project second to orthogonal component. Take cross product for third. Stack to form SO matrix.""" u1 = v1 e1 = u1 / u1.norm(p=2, dim=-1, keepdim=True).clamp(min=1e-05) u2 = v2 - (e1 * v2).sum(-1, keepdim=True) * e1 e2 = u2 / u2.norm(p=2, dim=-1, keepdim=True).clamp(min=1e-05) e3 = torch.cross(e1, e2) return torch.stack([e1, e2, e3], 1) class S2S2Mean(nn.Module): """Module to map R^6 -> SO(3) with S2S2 method.""" def __init__(self, input_dims): super().__init__() self.map = nn.Linear(input_dims, 6) self.map.weight.data.uniform_(-10, 10) self.map.bias.data.uniform_(-10, 10) def forward(self, x): v = self.map(x).double().view(-1, 2, 3) v1, v2 = v[:, 0], v[:, 1] return s2s2_gram_schmidt(v1, v2).float() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dims': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clamp_div_linalg_vector_norm_mul_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 6 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 6 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 6 * x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (3 + 6 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr0 + (4 + 6 * x0), xmask, eviction_policy='evict_last' ) tmp24 = tl.load(in_ptr0 + (5 + 6 * x0), xmask, eviction_policy='evict_last' ) tmp1 = tmp0.to(tl.float64) tmp2 = tmp1 * tmp1 tmp4 = tmp3.to(tl.float64) tmp5 = tmp4 * tmp4 tmp6 = tmp2 + tmp5 tmp8 = tmp7.to(tl.float64) tmp9 = tmp8 * tmp8 tmp10 = tmp6 + tmp9 tmp11 = libdevice.sqrt(tmp10) tmp12 = tl.full([1], 1e-05, tl.float64) tmp13 = triton_helpers.maximum(tmp11, tmp12) tmp14 = tmp1 / tmp13 tmp16 = tmp15.to(tl.float64) tmp17 = tmp14 * tmp16 tmp18 = tmp4 / tmp13 tmp20 = tmp19.to(tl.float64) tmp21 = tmp18 * tmp20 tmp22 = tmp17 + tmp21 tmp23 = tmp8 / tmp13 tmp25 = tmp24.to(tl.float64) tmp26 = tmp23 * tmp25 tmp27 = tmp22 + tmp26 tl.store(out_ptr0 + x0, tmp27, xmask) @triton.jit def triton_poi_fused_clamp_div_linalg_vector_norm_mul_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = xindex // 3 x2 = xindex tmp0 = tl.load(in_ptr0 + (3 + x0 + 6 * x1), xmask) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (x0 + 6 * x1), xmask) tmp5 = tl.load(in_ptr0 + 6 * x1, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + 6 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 6 * x1), xmask, eviction_policy='evict_last' ) tmp1 = tmp0.to(tl.float64) tmp4 = tmp3.to(tl.float64) tmp6 = tmp5.to(tl.float64) tmp7 = tmp6 * tmp6 tmp9 = tmp8.to(tl.float64) tmp10 = tmp9 * tmp9 tmp11 = tmp7 + tmp10 tmp13 = tmp12.to(tl.float64) tmp14 = tmp13 * tmp13 tmp15 = tmp11 + tmp14 tmp16 = libdevice.sqrt(tmp15) tmp17 = tl.full([1], 1e-05, tl.float64) tmp18 = triton_helpers.maximum(tmp16, tmp17) tmp19 = tmp4 / tmp18 tmp20 = tmp2 * tmp19 tmp21 = tmp1 - tmp20 tl.store(out_ptr0 + x2, tmp21, xmask) @triton.jit def triton_poi_fused_clamp_div_linalg_cross_linalg_vector_norm_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = xindex // 3 x2 = xindex tmp0 = tl.load(in_ptr0 + (6 * x1 + (1 + x0) % 3), xmask) tmp2 = tl.load(in_ptr0 + 6 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 6 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 6 * x1), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (3 * x1 + (2 + x0) % 3), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + 3 * x1, xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (1 + 3 * x1), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr1 + (2 + 3 * x1), xmask, eviction_policy='evict_last' ) tmp30 = tl.load(in_ptr0 + (6 * x1 + (2 + x0) % 3), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (3 * x1 + (1 + x0) % 3), xmask) tmp1 = tmp0.to(tl.float64) tmp3 = tmp2.to(tl.float64) tmp4 = tmp3 * tmp3 tmp6 = tmp5.to(tl.float64) tmp7 = tmp6 * tmp6 tmp8 = tmp4 + tmp7 tmp10 = tmp9.to(tl.float64) tmp11 = tmp10 * tmp10 tmp12 = tmp8 + tmp11 tmp13 = libdevice.sqrt(tmp12) tmp14 = tl.full([1], 1e-05, tl.float64) tmp15 = triton_helpers.maximum(tmp13, tmp14) tmp16 = tmp1 / tmp15 tmp19 = tmp18 * tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = triton_helpers.maximum(tmp26, tmp14) tmp28 = tmp17 / tmp27 tmp29 = tmp16 * tmp28 tmp31 = tmp30.to(tl.float64) tmp32 = tmp31 / tmp15 tmp34 = tmp33 / tmp27 tmp35 = tmp32 * tmp34 tl.store(out_ptr0 + x2, tmp29, xmask) tl.store(out_ptr1 + x2, tmp35, xmask) @triton.jit def triton_poi_fused__to_copy_stack_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 9 x1 = xindex // 9 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 3, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (6 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp5.to(tl.float64) tmp7 = tl.load(in_ptr0 + 6 * x1, tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp8 = tmp7.to(tl.float64) tmp9 = tmp8 * tmp8 tmp10 = tl.load(in_ptr0 + (1 + 6 * x1), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp10.to(tl.float64) tmp12 = tmp11 * tmp11 tmp13 = tmp9 + tmp12 tmp14 = tl.load(in_ptr0 + (2 + 6 * x1), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tmp14.to(tl.float64) tmp16 = tmp15 * tmp15 tmp17 = tmp13 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tl.full([1], 1e-05, tl.float64) tmp20 = triton_helpers.maximum(tmp18, tmp19) tmp21 = tmp6 / tmp20 tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp4, tmp21, tmp22) tmp24 = tmp0 >= tmp3 tmp25 = tl.full([1], 6, tl.int64) tmp26 = tmp0 < tmp25 tmp27 = tmp24 & tmp26 tmp28 = tl.load(in_ptr1 + (3 * x1 + (-3 + x0)), tmp27 & xmask, eviction_policy='evict_last', other=0.0) tmp29 = tl.load(in_ptr1 + 3 * x1, tmp27 & xmask, eviction_policy= 'evict_last', other=0.0) tmp30 = tmp29 * tmp29 tmp31 = tl.load(in_ptr1 + (1 + 3 * x1), tmp27 & xmask, eviction_policy= 'evict_last', other=0.0) tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tl.load(in_ptr1 + (2 + 3 * x1), tmp27 & xmask, eviction_policy= 'evict_last', other=0.0) tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = libdevice.sqrt(tmp36) tmp38 = triton_helpers.maximum(tmp37, tmp19) tmp39 = tmp28 / tmp38 tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype) tmp41 = tl.where(tmp27, tmp39, tmp40) tmp42 = tmp0 >= tmp25 tl.full([1], 9, tl.int64) tmp45 = tl.load(in_ptr2 + (3 * x1 + (-6 + x0)), tmp42 & xmask, eviction_policy='evict_last', other=0.0) tmp46 = tl.load(in_ptr3 + (3 * x1 + (-6 + x0)), tmp42 & xmask, eviction_policy='evict_last', other=0.0) tmp47 = tmp45 - tmp46 tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype) tmp49 = tl.where(tmp42, tmp47, tmp48) tmp50 = tl.where(tmp27, tmp41, tmp49) tmp51 = tl.where(tmp4, tmp23, tmp50) tmp52 = tmp51.to(tl.float32) tl.store(out_ptr1 + x2, tmp52, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (6, 4), (4, 1)) assert_size_stride(primals_2, (6,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 6), (6, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 6), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 1), (1, 64), torch.float64) get_raw_stream(0) triton_poi_fused_clamp_div_linalg_vector_norm_mul_sum_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((64, 3), (3, 1), torch.float64) triton_poi_fused_clamp_div_linalg_vector_norm_mul_sub_sum_1[grid(192)]( buf0, buf1, buf2, 192, XBLOCK=128, num_warps=4, num_stages=1) del buf1 buf3 = empty_strided_cuda((64, 3), (3, 1), torch.float64) buf4 = empty_strided_cuda((64, 3), (3, 1), torch.float64) triton_poi_fused_clamp_div_linalg_cross_linalg_vector_norm_2[grid(192) ](buf0, buf2, buf3, buf4, 192, XBLOCK=128, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((64, 3, 3), (9, 3, 1), torch.float32) triton_poi_fused__to_copy_stack_3[grid(576)](buf0, buf2, buf3, buf4, buf6, 576, XBLOCK=128, num_warps=4, num_stages=1) del buf2 del buf3 del buf4 return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0 def s2s2_gram_schmidt(v1, v2): """Normalise 2 3-vectors. Project second to orthogonal component. Take cross product for third. Stack to form SO matrix.""" u1 = v1 e1 = u1 / u1.norm(p=2, dim=-1, keepdim=True).clamp(min=1e-05) u2 = v2 - (e1 * v2).sum(-1, keepdim=True) * e1 e2 = u2 / u2.norm(p=2, dim=-1, keepdim=True).clamp(min=1e-05) e3 = torch.cross(e1, e2) return torch.stack([e1, e2, e3], 1) class S2S2MeanNew(nn.Module): """Module to map R^6 -> SO(3) with S2S2 method.""" def __init__(self, input_dims): super().__init__() self.map = nn.Linear(input_dims, 6) self.map.weight.data.uniform_(-10, 10) self.map.bias.data.uniform_(-10, 10) def forward(self, input_0): primals_1 = self.map.weight primals_2 = self.map.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
pimdh/lie-vae
S2S2Mean
false
16,272
[ "MIT" ]
83
0e0cc4d533c064fcfc405e8a75449f8b2f6cf8cf
https://github.com/pimdh/lie-vae/tree/0e0cc4d533c064fcfc405e8a75449f8b2f6cf8cf
CrossLayer
import torch import torch.nn as nn import torch.optim class CrossLayer(nn.Module): def __init__(self, d, dropout): super().__init__() self.linear = nn.Linear(d, d) self.dropout = nn.Dropout(dropout) def forward(self, x0, x): return self.dropout(x0 * self.linear(x)) + x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + x2, xmask) tmp3 = tmp1 + tmp2 tmp4 = tmp0 * tmp3 tmp6 = tmp4 + tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](buf1, primals_4, primals_2, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf1, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class CrossLayerNew(nn.Module): def __init__(self, d, dropout): super().__init__() self.linear = nn.Linear(d, d) self.dropout = nn.Dropout(dropout) def forward(self, input_0, input_1): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
ppmdatix/rtdl
CrossLayer
false
16,273
[ "Apache-2.0" ]
298
a01ecd9ae6b673f4e82e51f804ffd7031c7350a0
https://github.com/ppmdatix/rtdl/tree/a01ecd9ae6b673f4e82e51f804ffd7031c7350a0
InitConv
import torch import torch.nn as nn import torch.nn.functional as F import torch.optim class InitConv(nn.Module): def __init__(self, in_channels=4, out_channels=16, dropout=0.2): super(InitConv, self).__init__() self.conv = nn.Conv3d(in_channels, out_channels, kernel_size=3, padding=1) self.dropout = dropout def forward(self, x): y = self.conv(x) y = F.dropout3d(y, self.dropout) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_bernoulli_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = float('nan') tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_div_mul_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = 1.25 tmp5 = tmp3 * tmp4 tmp6 = tmp2 * tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (16, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 16, 4, 4, 4), (1024, 64, 16, 4, 1)) buf2 = empty_strided_cuda((1, 16, 1, 1, 1), (16, 1, 1, 1, 1), torch .float32) get_raw_stream(0) triton_poi_fused_bernoulli_0[grid(16)](buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) torch.ops.aten.bernoulli_.float(buf2, 0.8) buf4 = buf0 del buf0 triton_poi_fused_div_mul_1[grid(1024)](buf4, primals_2, buf2, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return reinterpret_tensor(buf4, (16, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf2 class InitConvNew(nn.Module): def __init__(self, in_channels=4, out_channels=16, dropout=0.2): super(InitConvNew, self).__init__() self.conv = nn.Conv3d(in_channels, out_channels, kernel_size=3, padding=1) self.dropout = dropout def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
potpov/TransBTS
InitConv
false
16,274
[ "Apache-2.0" ]
163
658de5f1dde17d25db54fb07adf49370cc32d7c3
https://github.com/potpov/TransBTS/tree/658de5f1dde17d25db54fb07adf49370cc32d7c3
SimpleNet
import torch import torch.nn as nn import torch.nn.functional as F class SimpleNet(nn.Module): def __init__(self, ni): super().__init__() self.linear1 = nn.Linear(ni, 128) self.linear2 = nn.Linear(128, 128) self.linear3 = nn.Linear(128, 64) self.linear4 = nn.Linear(64, 64) self.linear5 = nn.Linear(64, 1) def forward(self, x): x = F.tanh(self.linear1(x)) x = F.tanh(self.linear2(x)) x = F.tanh(self.linear3(x)) x = F.tanh(self.linear4(x)) x = self.linear5(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'ni': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, None) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (128, 128), (128, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (64, 128), (128, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 64), (64, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (1, 64), (64, 1)) assert_size_stride(primals_11, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(8192)](buf1, primals_2, 8192, XBLOCK= 128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 128), (1, 128), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf2 triton_poi_fused_tanh_0[grid(8192)](buf3, primals_5, 8192, XBLOCK= 128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 64), (1, 128), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf4 triton_poi_fused_tanh_1[grid(4096)](buf5, primals_7, 4096, XBLOCK= 256, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (64, 64), (64, 1), 0), reinterpret_tensor(primals_8, (64, 64), (1, 64), 0), out=buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf6 triton_poi_fused_tanh_1[grid(4096)](buf7, primals_9, 4096, XBLOCK= 256, num_warps=4, num_stages=1) del primals_9 buf9 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf7, (64, 64), (64, 1), 0), reinterpret_tensor(primals_10, (64, 1), (1, 64), 0 ), alpha=1, beta=1, out=buf9) del primals_11 return reinterpret_tensor(buf9, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf3, buf5, buf7, primals_10, primals_8, primals_6, primals_4 class SimpleNetNew(nn.Module): def __init__(self, ni): super().__init__() self.linear1 = nn.Linear(ni, 128) self.linear2 = nn.Linear(128, 128) self.linear3 = nn.Linear(128, 64) self.linear4 = nn.Linear(64, 64) self.linear5 = nn.Linear(64, 1) def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_6 = self.linear3.weight primals_7 = self.linear3.bias primals_8 = self.linear4.weight primals_9 = self.linear4.bias primals_10 = self.linear5.weight primals_11 = self.linear5.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
pranjukn/AI-Feynman
SimpleNet
false
16,275
[ "MIT" ]
470
92e67b01fc2b00ed6ebcacc67edf6122b4219ac7
https://github.com/pranjukn/AI-Feynman/tree/92e67b01fc2b00ed6ebcacc67edf6122b4219ac7
AlphaChooser
import torch from torch import nn class AlphaChooser(torch.nn.Module): """ It manages the alpha values in alpha-entmax function. """ def __init__(self, head_count): super(AlphaChooser, self).__init__() self.pre_alpha = nn.Parameter(torch.randn(head_count)) def forward(self): alpha = 1 + torch.sigmoid(self.pre_alpha) return torch.clamp(alpha, min=1.01, max=2) def get_inputs(): return [] def get_init_inputs(): return [[], {'head_count': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_clamp_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = 1.0 tmp3 = tmp1 + tmp2 tmp4 = 1.01 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = 2.0 tmp7 = triton_helpers.minimum(tmp5, tmp6) tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): primals_1, = args args.clear() assert_size_stride(primals_1, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_poi_fused_add_clamp_sigmoid_0[grid(4)](primals_1, buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) return buf0, primals_1 class AlphaChooserNew(torch.nn.Module): """ It manages the alpha values in alpha-entmax function. """ def __init__(self, head_count): super(AlphaChooserNew, self).__init__() self.pre_alpha = nn.Parameter(torch.randn(head_count)) def forward(self): primals_1 = self.pre_alpha output = call([primals_1]) return output[0]
prajjwal1/fluence2
AlphaChooser
false
16,276
[ "Apache-2.0" ]
64
f7353f4947ac4712ecd1df34e97df27d83060f13
https://github.com/prajjwal1/fluence2/tree/f7353f4947ac4712ecd1df34e97df27d83060f13
GatedConv
import torch from torch import nn import torch.nn.init as init class GatedConv(nn.Module): """GatedConv.""" def __init__(self, input_size, width=3, dropout=0.2, nopad=False): """init.""" super(GatedConv, self).__init__() self.conv = nn.Conv2d(in_channels=input_size, out_channels=2 * input_size, kernel_size=(width, 1), stride=(1, 1), padding=( width // 2 * (1 - nopad), 0)) init.xavier_uniform_(self.conv.weight, gain=(4 * (1 - dropout)) ** 0.5) self.dropout = nn.Dropout(dropout) def forward(self, x_var): """forward.""" x_var = self.dropout(x_var) x_var = self.conv(x_var) out, gate = x_var.split(int(x_var.size(1) / 2), 1) out = out * torch.sigmoid(gate) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.nn.init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = xindex // 64 x2 = xindex tmp0 = tl.load(in_ptr0 + (64 + x0 + 128 * x1), xmask) tmp2 = tl.load(in_ptr0 + (x0 + 128 * x1), xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp2 * tmp1 tl.store(out_ptr0 + x2, tmp1, xmask) tl.store(out_ptr1 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, 4, 3, 1), (12, 3, 1, 1)) assert_size_stride(primals_3, (8,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 4, 4), (128, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(512)](buf1, primals_3, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_1[grid(256)](buf1, buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf3, primals_1, primals_2, reinterpret_tensor(buf1, (4, 4, 4, 4 ), (128, 16, 4, 1), 0), buf2 class GatedConvNew(nn.Module): """GatedConv.""" def __init__(self, input_size, width=3, dropout=0.2, nopad=False): """init.""" super(GatedConvNew, self).__init__() self.conv = nn.Conv2d(in_channels=input_size, out_channels=2 * input_size, kernel_size=(width, 1), stride=(1, 1), padding=( width // 2 * (1 - nopad), 0)) init.xavier_uniform_(self.conv.weight, gain=(4 * (1 - dropout)) ** 0.5) self.dropout = nn.Dropout(dropout) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
pppku/SVS_system
GatedConv
false
16,277
[ "Apache-2.0" ]
78
95ef1076c51bfc0b74349b8058a9c918ff24c500
https://github.com/pppku/SVS_system/tree/95ef1076c51bfc0b74349b8058a9c918ff24c500
FFN
import torch from torch import nn import torch as t class Conv(nn.Module): """Convolution Module.""" def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=True, w_init='linear'): """init.""" super(Conv, self).__init__() self.conv = nn.Conv1d(in_channels, out_channels, kernel_size= kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) nn.init.xavier_uniform_(self.conv.weight, gain=nn.init. calculate_gain(w_init)) def forward(self, x): """forward.""" x = self.conv(x) return x class FFN(nn.Module): """Positionwise Feed-Forward Network.""" def __init__(self, num_hidden): """:param num_hidden: dimension of hidden.""" super(FFN, self).__init__() self.w_1 = Conv(num_hidden, num_hidden * 4, kernel_size=1, w_init= 'relu') self.w_2 = Conv(num_hidden * 4, num_hidden, kernel_size=1) self.dropout = nn.Dropout(p=0.1) self.layer_norm = nn.LayerNorm(num_hidden) def forward(self, input_): """forward.""" x = input_.transpose(1, 2) x = self.w_2(t.relu(self.w_1(x))) x = x.transpose(1, 2) x = x + input_ x = self.layer_norm(x) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'num_hidden': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp4 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp8 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp12 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) tl.store(out_ptr1 + x2, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2 + 4 * y3), xmask & ymask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + y3, ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + y3, ymask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2 + 4 * y3), tmp13, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (16, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (16,), (1,)) assert_size_stride(primals_4, (4, 16, 1), (16, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 16, 4), (64, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_relu_1[grid(256)](buf2, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4), (16, 4, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_2[grid(64)](buf4, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_3[grid(16)](buf4, primals_1, buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_4[grid(16, 4)](buf4, primals_1, buf5, buf6, primals_6, primals_7, buf7, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf5 del buf6 del primals_7 return buf7, primals_1, primals_2, primals_4, primals_6, buf2, buf4 class Conv(nn.Module): """Convolution Module.""" def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=True, w_init='linear'): """init.""" super(Conv, self).__init__() self.conv = nn.Conv1d(in_channels, out_channels, kernel_size= kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) nn.init.xavier_uniform_(self.conv.weight, gain=nn.init. calculate_gain(w_init)) def forward(self, x): """forward.""" x = self.conv(x) return x class FFNNew(nn.Module): """Positionwise Feed-Forward Network.""" def __init__(self, num_hidden): """:param num_hidden: dimension of hidden.""" super(FFNNew, self).__init__() self.w_1 = Conv(num_hidden, num_hidden * 4, kernel_size=1, w_init= 'relu') self.w_2 = Conv(num_hidden * 4, num_hidden, kernel_size=1) self.dropout = nn.Dropout(p=0.1) self.layer_norm = nn.LayerNorm(num_hidden) def forward(self, input_0): primals_2 = self.w_1.conv.weight primals_3 = self.w_1.conv.bias primals_4 = self.w_2.conv.weight primals_5 = self.w_2.conv.bias primals_6 = self.layer_norm.weight primals_7 = self.layer_norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
pppku/SVS_system
FFN
false
16,278
[ "Apache-2.0" ]
78
95ef1076c51bfc0b74349b8058a9c918ff24c500
https://github.com/pppku/SVS_system/tree/95ef1076c51bfc0b74349b8058a9c918ff24c500
visual_context
import torch import torch.nn as nn import torch.utils.data class visual_context(nn.Module): def __init__(self): super(visual_context, self).__init__() self.AdaptiveAvgPool = nn.AdaptiveAvgPool2d((None, 1)) def forward(self, visual_feature): visual_feature = self.AdaptiveAvgPool(visual_feature.permute(0, 3, 1, 2)) visual_feature = visual_feature.squeeze(3) return visual_feature def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__adaptive_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 16), torch.float32) get_raw_stream(0) triton_poi_fused__adaptive_avg_pool2d_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), class visual_contextNew(nn.Module): def __init__(self): super(visual_contextNew, self).__init__() self.AdaptiveAvgPool = nn.AdaptiveAvgPool2d((None, 1)) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
prabhatrmishra/IDCardInfoExtr
visual_context
false
16,279
[ "Apache-2.0" ]
66
c59270f61a3251a6aff55bc7d81f2057c4663a37
https://github.com/prabhatrmishra/IDCardInfoExtr/tree/c59270f61a3251a6aff55bc7d81f2057c4663a37
DispConv
import torch import torch.nn as nn class Conv3x3(nn.Module): def __init__(self, in_channels, out_channels, use_refl=True): super(Conv3x3, self).__init__() if use_refl: self.pad = nn.ReflectionPad2d(1) else: self.pad = nn.ZeroPad2d(1) self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3) def forward(self, inputs): inputs = self.pad(inputs) out = self.conv(inputs) return out class DispConv(nn.Module): def __init__(self, in_channels, out_channels): super(DispConv, self).__init__() self.conv = Conv3x3(in_channels=in_channels, out_channels=out_channels) self.sigmod = nn.Sigmoid() def forward(self, inputs): out = self.conv(inputs) out = self.sigmod(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x3, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_sigmoid_1[grid(256)](buf2, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0, buf2 class Conv3x3(nn.Module): def __init__(self, in_channels, out_channels, use_refl=True): super(Conv3x3, self).__init__() if use_refl: self.pad = nn.ReflectionPad2d(1) else: self.pad = nn.ZeroPad2d(1) self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3) def forward(self, inputs): inputs = self.pad(inputs) out = self.conv(inputs) return out class DispConvNew(nn.Module): def __init__(self, in_channels, out_channels): super(DispConvNew, self).__init__() self.conv = Conv3x3(in_channels=in_channels, out_channels=out_channels) self.sigmod = nn.Sigmoid() def forward(self, input_0): primals_2 = self.conv.conv.weight primals_3 = self.conv.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
prstrive/EPCDepth
DispConv
false
16,280
[ "MIT" ]
76
84119c806741334b652749ee953e3eab60a3718c
https://github.com/prstrive/EPCDepth/tree/84119c806741334b652749ee953e3eab60a3718c
CosineAngularLoss
import torch import torch.nn as nn import torch.nn.parallel class CosineAngularLoss(nn.Module): def __init__(self): super(CosineAngularLoss, self).__init__() def forward(self, preds, truths): preds_norm = torch.nn.functional.normalize(preds, p=2, dim=1) truths_norm = torch.nn.functional.normalize(truths, p=2, dim=1) loss = torch.mean(-torch.sum(preds_norm * truths_norm, dim=1)) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr1 + x3, xmask) tmp17 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp22 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tmp18 = tmp17 * tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = libdevice.sqrt(tmp27) tmp29 = triton_helpers.maximum(tmp28, tmp13) tmp30 = tmp16 / tmp29 tmp31 = tmp15 * tmp30 tl.store(out_ptr0 + x3, tmp31, xmask) @triton.jit def triton_per_fused_mean_neg_sum_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp3 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = -tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = tl.sum(tmp8, 1)[:, None] tmp11 = 64.0 tmp12 = tmp10 / tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp12, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_mul_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused_mean_neg_sum_1[grid(1)](buf2, buf0, 1, 64, XBLOCK= 1, num_warps=2, num_stages=1) del buf0 return buf2, class CosineAngularLossNew(nn.Module): def __init__(self): super(CosineAngularLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
princeton-vl/oasis
CosineAngularLoss
false
16,281
[ "BSD-3-Clause" ]
59
5835d24c331d78e91becba29f7e4a53ccd3e376e
https://github.com/princeton-vl/oasis/tree/5835d24c331d78e91becba29f7e4a53ccd3e376e
InfoLoss
import math import torch import torch.nn as nn class InfoLoss(nn.Module): def __init__(self): super().__init__() def forward(self, x, eps=1e-08): x = torch.mean(x, 0) logN = math.log(float(x.shape[0])) x = x * (x + eps).log() / logN neg_entropy = x.sum() return 1.0 + neg_entropy def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_log_mean_mul_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr0 + (64 + r0), None) tmp3 = tl.load(in_ptr0 + (128 + r0), None) tmp5 = tl.load(in_ptr0 + (192 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = 1e-08 tmp10 = tmp8 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp8 * tmp11 tmp13 = 0.7213475204444817 tmp14 = tmp12 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.sum(tmp15, 1)[:, None] tmp18 = 1.0 tmp19 = tmp17 + tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp19, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_log_mean_mul_sum_0[grid(1)](buf1, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf1, class InfoLossNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
pudumagico/deepproblog
InfoLoss
false
16,282
[ "Apache-2.0" ]
54
6d38e783990551f4030780a1d69c7138fada2020
https://github.com/pudumagico/deepproblog/tree/6d38e783990551f4030780a1d69c7138fada2020
EntropyLoss
import math import torch import torch.nn as nn class EntropyLoss(nn.Module): def __init__(self): super().__init__() def forward(self, x, eps=1e-08): logN = math.log(float(x.shape[0])) x = x * (x + eps).log() / logN neg_entropy = x.sum(1) return -neg_entropy.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_log_mean_mul_neg_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp7 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp13 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp19 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp1 = 1e-08 tmp2 = tmp0 + tmp1 tmp3 = tl_math.log(tmp2) tmp4 = tmp0 * tmp3 tmp5 = 0.7213475204444817 tmp6 = tmp4 * tmp5 tmp8 = tmp7 + tmp1 tmp9 = tl_math.log(tmp8) tmp10 = tmp7 * tmp9 tmp11 = tmp10 * tmp5 tmp12 = tmp6 + tmp11 tmp14 = tmp13 + tmp1 tmp15 = tl_math.log(tmp14) tmp16 = tmp13 * tmp15 tmp17 = tmp16 * tmp5 tmp18 = tmp12 + tmp17 tmp20 = tmp19 + tmp1 tmp21 = tl_math.log(tmp20) tmp22 = tmp19 * tmp21 tmp23 = tmp22 * tmp5 tmp24 = tmp18 + tmp23 tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.sum(tmp25, 1)[:, None] tmp28 = 64.0 tmp29 = tmp27 / tmp28 tmp30 = -tmp29 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp30, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_log_mean_mul_neg_sum_0[grid(1)](buf1, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf1, class EntropyLossNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
pudumagico/deepproblog
EntropyLoss
false
16,283
[ "Apache-2.0" ]
54
6d38e783990551f4030780a1d69c7138fada2020
https://github.com/pudumagico/deepproblog/tree/6d38e783990551f4030780a1d69c7138fada2020
ConvElu
import torch import torch.nn as nn class ConvElu(nn.Module): def __init__(self, in_ch=3, out_ch=3, dirate=1): super(ConvElu, self).__init__() self.conv_s1 = nn.Conv2d(in_ch, out_ch, 3, padding=1 * dirate, dilation=1 * dirate, padding_mode='reflect') self.elu = nn.ELU(inplace=True) def forward(self, x): hx = x xout = self.elu(self.conv_s1(hx)) return xout def get_inputs(): return [torch.rand([4, 3, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 432 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_elu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp9, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(primals_2, (3, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_3, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 6, 6), (108, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(432)](primals_1, buf0, 432, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 3, 4, 4), (48, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_elu_1[grid(192)](buf2, primals_3, 192, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0, buf2 class ConvEluNew(nn.Module): def __init__(self, in_ch=3, out_ch=3, dirate=1): super(ConvEluNew, self).__init__() self.conv_s1 = nn.Conv2d(in_ch, out_ch, 3, padding=1 * dirate, dilation=1 * dirate, padding_mode='reflect') self.elu = nn.ELU(inplace=True) def forward(self, input_0): primals_2 = self.conv_s1.weight primals_3 = self.conv_s1.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
prstrive/EPCDepth
ConvElu
false
16,284
[ "MIT" ]
76
84119c806741334b652749ee953e3eab60a3718c
https://github.com/prstrive/EPCDepth/tree/84119c806741334b652749ee953e3eab60a3718c
JSD
import math import torch import torch.nn as nn class JSD(nn.Module): def __init__(self): super().__init__() def forward(self, x, eps=1e-08): logN = math.log(float(x.shape[0])) y = torch.mean(x, 0) y = y * (y + eps).log() / logN y = y.sum() x = x * (x + eps).log() / logN x = x.sum(1).mean() return 1.0 - x + y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_log_mean_mul_rsub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp7 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp13 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp19 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp28 = tl.load(in_ptr0 + r2, None) tmp29 = tl.load(in_ptr0 + (64 + r2), None) tmp31 = tl.load(in_ptr0 + (128 + r2), None) tmp33 = tl.load(in_ptr0 + (192 + r2), None) tmp1 = 1e-08 tmp2 = tmp0 + tmp1 tmp3 = tl_math.log(tmp2) tmp4 = tmp0 * tmp3 tmp5 = 0.7213475204444817 tmp6 = tmp4 * tmp5 tmp8 = tmp7 + tmp1 tmp9 = tl_math.log(tmp8) tmp10 = tmp7 * tmp9 tmp11 = tmp10 * tmp5 tmp12 = tmp6 + tmp11 tmp14 = tmp13 + tmp1 tmp15 = tl_math.log(tmp14) tmp16 = tmp13 * tmp15 tmp17 = tmp16 * tmp5 tmp18 = tmp12 + tmp17 tmp20 = tmp19 + tmp1 tmp21 = tl_math.log(tmp20) tmp22 = tmp19 * tmp21 tmp23 = tmp22 * tmp5 tmp24 = tmp18 + tmp23 tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.sum(tmp25, 1)[:, None] tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp34 = tmp32 + tmp33 tmp35 = 4.0 tmp36 = tmp34 / tmp35 tmp37 = tmp36 + tmp1 tmp38 = tl_math.log(tmp37) tmp39 = tmp36 * tmp38 tmp40 = tmp39 * tmp5 tmp41 = tl.broadcast_to(tmp40, [XBLOCK, RBLOCK]) tmp43 = tl.sum(tmp41, 1)[:, None] tmp44 = 64.0 tmp45 = tmp27 / tmp44 tmp46 = 1.0 tmp47 = tmp46 - tmp45 tmp48 = tmp47 + tmp43 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp48, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_log_mean_mul_rsub_sum_0[grid(1)](buf2, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf2, class JSDNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
pudumagico/deepproblog
JSD
false
16,285
[ "Apache-2.0" ]
54
6d38e783990551f4030780a1d69c7138fada2020
https://github.com/pudumagico/deepproblog/tree/6d38e783990551f4030780a1d69c7138fada2020
Highway
import torch from torch import nn class Highway(nn.Module): def __init__(self, in_size, out_size): super(Highway, self).__init__() self.H = nn.Linear(in_size, out_size) self.H.bias.data.zero_() self.T = nn.Linear(in_size, out_size) self.T.bias.data.fill_(-1) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() def forward(self, inputs): H = self.relu(self.H(inputs)) T = self.sigmoid(self.T(inputs)) return H * T + inputs * (1.0 - T) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_size': 4, 'out_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_relu_rsub_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask) tmp6 = tl.load(in_ptr2 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tl.sigmoid(tmp3) tmp5 = tmp2 * tmp4 tmp7 = 1.0 tmp8 = tmp7 - tmp4 tmp9 = tmp6 * tmp8 tmp10 = tmp5 + tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_relu_rsub_sigmoid_0[grid(256)](buf0, buf1, primals_3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf2, primals_3, buf0, buf1 class HighwayNew(nn.Module): def __init__(self, in_size, out_size): super(HighwayNew, self).__init__() self.H = nn.Linear(in_size, out_size) self.H.bias.data.zero_() self.T = nn.Linear(in_size, out_size) self.T.bias.data.fill_(-1) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_1 = self.H.weight primals_2 = self.H.bias primals_4 = self.T.weight primals_5 = self.T.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
puppyapple/tacotron_pytorch
Highway
false
16,286
[ "MIT" ]
278
800bf8b0538c91f1104e99d8e7c1b645bb6154d3
https://github.com/puppyapple/tacotron_pytorch/tree/800bf8b0538c91f1104e99d8e7c1b645bb6154d3
UpConv
import torch import torch.nn as nn class Conv3x3(nn.Module): def __init__(self, in_channels, out_channels, use_refl=True): super(Conv3x3, self).__init__() if use_refl: self.pad = nn.ReflectionPad2d(1) else: self.pad = nn.ZeroPad2d(1) self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3) def forward(self, inputs): inputs = self.pad(inputs) out = self.conv(inputs) return out class UpConv(nn.Module): def __init__(self, in_channels, out_channels): super(UpConv, self).__init__() self.upsamp = nn.UpsamplingNearest2d(scale_factor=2) self.conv = Conv3x3(in_channels=in_channels, out_channels=out_channels) self.elu = nn.ELU(inplace=True) def forward(self, inputs): out = self.upsamp(inputs) out = self.conv(out) out = self.elu(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__unsafe_index_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 10 % 10 x0 = xindex % 10 x2 = xindex // 100 x5 = xindex tmp0 = 7 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = 7 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x0)) tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x5, tmp9, xmask) @triton.jit def triton_poi_fused_convolution_elu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 64 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp9, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 10, 10), (400, 100, 10, 1), torch. float32) get_raw_stream(0) triton_poi_fused__unsafe_index_reflection_pad2d_0[grid(1600)](primals_1 , buf0, 1600, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 8, 8), (256, 64, 8, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_elu_1[grid(1024)](buf2, primals_3, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0, buf2 class Conv3x3(nn.Module): def __init__(self, in_channels, out_channels, use_refl=True): super(Conv3x3, self).__init__() if use_refl: self.pad = nn.ReflectionPad2d(1) else: self.pad = nn.ZeroPad2d(1) self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3) def forward(self, inputs): inputs = self.pad(inputs) out = self.conv(inputs) return out class UpConvNew(nn.Module): def __init__(self, in_channels, out_channels): super(UpConvNew, self).__init__() self.upsamp = nn.UpsamplingNearest2d(scale_factor=2) self.conv = Conv3x3(in_channels=in_channels, out_channels=out_channels) self.elu = nn.ELU(inplace=True) def forward(self, input_0): primals_2 = self.conv.conv.weight primals_3 = self.conv.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
prstrive/EPCDepth
UpConv
false
16,287
[ "MIT" ]
76
84119c806741334b652749ee953e3eab60a3718c
https://github.com/prstrive/EPCDepth/tree/84119c806741334b652749ee953e3eab60a3718c
SoftDiceLoss
import torch from torch.nn.modules.loss import _Loss class SoftDiceLoss(_Loss): def __init__(self, size_average=None, reduce=None, reduction='mean'): super(SoftDiceLoss, self).__init__(size_average, reduce, reduction) def forward(self, y_pred, y_gt): numerator = torch.sum(y_pred * y_gt) denominator = torch.sum(y_pred * y_pred + y_gt * y_gt) return numerator / denominator def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn.modules.loss import _Loss assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mul_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tmp0 * tmp0 tmp7 = tmp1 * tmp1 tmp8 = tmp6 + tmp7 tmp9 = tl.broadcast_to(tmp8, [RBLOCK]) tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0)) tmp12 = tmp5 / tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_mul_sum_0[grid(1)](buf2, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, class SoftDiceLossNew(_Loss): def __init__(self, size_average=None, reduce=None, reduction='mean'): super(SoftDiceLossNew, self).__init__(size_average, reduce, reduction) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
purbayankar/pytorch-UNet
SoftDiceLoss
false
16,288
[ "MIT" ]
91
63183199b1cf4e23a37869d30fc335e484c0c0fe
https://github.com/purbayankar/pytorch-UNet/tree/63183199b1cf4e23a37869d30fc335e484c0c0fe
Attention
import math import torch from torch import nn from torch.nn import Linear import torch as t from torch.autograd import Variable class MultiheadAttention(nn.Module): """Multihead attention mechanism (dot attention).""" def __init__(self, num_hidden_k): """:param num_hidden_k: dimension of hidden.""" super(MultiheadAttention, self).__init__() self.num_hidden_k = num_hidden_k self.attn_dropout = nn.Dropout(p=0.1) def forward(self, key, value, query, mask=None, query_mask=None, gaussian_factor=None): """forward.""" attn = t.bmm(query, key.transpose(1, 2)) attn = attn / math.sqrt(self.num_hidden_k) if gaussian_factor is not None: attn = attn - gaussian_factor if mask is not None: attn = attn.masked_fill(mask, -2 ** 32 + 1) attn = t.softmax(attn, dim=-1) else: attn = t.softmax(attn, dim=-1) if query_mask is not None: attn = attn * query_mask result = t.bmm(attn, value) return result, attn class Attention(nn.Module): """Attention Network.""" def __init__(self, num_hidden, h=4, local_gaussian=False): """init.""" super(Attention, self).__init__() self.num_hidden = num_hidden self.num_hidden_per_attn = num_hidden // h self.h = h self.key = Linear(num_hidden, num_hidden, bias=False) self.value = Linear(num_hidden, num_hidden, bias=False) self.query = Linear(num_hidden, num_hidden, bias=False) self.multihead = MultiheadAttention(self.num_hidden_per_attn) self.local_gaussian = local_gaussian if local_gaussian: self.local_gaussian_factor = Variable(t.tensor(30.0), requires_grad=True).float() else: self.local_gaussian_factor = None self.residual_dropout = nn.Dropout(p=0.1) self.final_linear = Linear(num_hidden * 2, num_hidden) self.layer_norm_1 = nn.LayerNorm(num_hidden) def forward(self, memory, decoder_input, mask=None, query_mask=None): """forward.""" batch_size = memory.size(0) seq_k = memory.size(1) seq_q = decoder_input.size(1) if query_mask is not None: query_mask = query_mask.unsqueeze(-1).repeat(1, 1, seq_k) query_mask = query_mask.repeat(self.h, 1, 1) if mask is not None: mask = mask.repeat(self.h, 1, 1) key = self.key(memory).view(batch_size, seq_k, self.h, self. num_hidden_per_attn) value = self.value(memory).view(batch_size, seq_k, self.h, self. num_hidden_per_attn) query = self.query(decoder_input).view(batch_size, seq_q, self.h, self.num_hidden_per_attn) key = key.permute(2, 0, 1, 3).contiguous().view(-1, seq_k, self. num_hidden_per_attn) value = value.permute(2, 0, 1, 3).contiguous().view(-1, seq_k, self .num_hidden_per_attn) query = query.permute(2, 0, 1, 3).contiguous().view(-1, seq_q, self .num_hidden_per_attn) if self.local_gaussian: row = t.arange(1, seq_k + 1).unsqueeze(0).unsqueeze(-1).repeat( batch_size * self.h, 1, seq_k) col = t.arange(1, seq_k + 1).unsqueeze(0).unsqueeze(0).repeat( batch_size * self.h, seq_k, 1) local_gaussian = t.pow(row - col, 2).float() self.local_gaussian_factor = self.local_gaussian_factor local_gaussian = local_gaussian / self.local_gaussian_factor else: local_gaussian = None result, attns = self.multihead(key, value, query, mask=mask, query_mask=query_mask, gaussian_factor=local_gaussian) attns = attns.view(self.h, batch_size, seq_q, seq_k) attns = attns.permute(1, 0, 2, 3) result = result.view(self.h, batch_size, seq_q, self. num_hidden_per_attn) result = result.permute(1, 2, 0, 3).contiguous().view(batch_size, seq_q, -1) result = t.cat([decoder_input, result], dim=-1) result = self.final_linear(result) result = result + decoder_input result = self.layer_norm_1(result) return result, attns class Linear(nn.Module): """Linear Module.""" def __init__(self, in_dim, out_dim, bias=True, w_init='linear'): """init.""" super(Linear, self).__init__() self.linear_layer = nn.Linear(in_dim, out_dim, bias=bias) nn.init.xavier_uniform_(self.linear_layer.weight, gain=nn.init. calculate_gain(w_init)) def forward(self, x): """forward.""" return self.linear_layer(x) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'num_hidden': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math from torch import nn from torch.nn import Linear import torch as t from torch.autograd import Variable assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x1 + 16 * (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, 8), (8, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(4, 16)](buf2, buf3, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf2 triton_poi_fused_clone_0[grid(4, 16)](buf0, buf4, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = buf5 del buf5 triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf6 buf8 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf0 triton_poi_fused_clone_0[grid(4, 16)](buf1, buf8, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 1), 0) del buf1 extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) triton_poi_fused_cat_3[grid(128)](primals_2, buf9, buf10, 128, XBLOCK=128, num_warps=4, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0) del buf9 extern_kernels.addmm(primals_7, reinterpret_tensor(buf10, (16, 8), (8, 1), 0), reinterpret_tensor(primals_6, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf11) del primals_7 buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_4[grid(16)](buf11, primals_2, buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_5[grid(64)](buf11, primals_2, buf12, buf13, primals_8, primals_9, buf14, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf12 del buf13 del primals_9 return buf14, reinterpret_tensor(buf7, (4, 4, 4, 4), (16, 64, 4, 1), 0 ), primals_2, primals_8, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf10, (16, 8), (8, 1), 0 ), buf11, primals_6, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 1), 0) class MultiheadAttention(nn.Module): """Multihead attention mechanism (dot attention).""" def __init__(self, num_hidden_k): """:param num_hidden_k: dimension of hidden.""" super(MultiheadAttention, self).__init__() self.num_hidden_k = num_hidden_k self.attn_dropout = nn.Dropout(p=0.1) def forward(self, key, value, query, mask=None, query_mask=None, gaussian_factor=None): """forward.""" attn = t.bmm(query, key.transpose(1, 2)) attn = attn / math.sqrt(self.num_hidden_k) if gaussian_factor is not None: attn = attn - gaussian_factor if mask is not None: attn = attn.masked_fill(mask, -2 ** 32 + 1) attn = t.softmax(attn, dim=-1) else: attn = t.softmax(attn, dim=-1) if query_mask is not None: attn = attn * query_mask result = t.bmm(attn, value) return result, attn class AttentionNew(nn.Module): """Attention Network.""" def __init__(self, num_hidden, h=4, local_gaussian=False): """init.""" super(AttentionNew, self).__init__() self.num_hidden = num_hidden self.num_hidden_per_attn = num_hidden // h self.h = h self.key = Linear(num_hidden, num_hidden, bias=False) self.value = Linear(num_hidden, num_hidden, bias=False) self.query = Linear(num_hidden, num_hidden, bias=False) self.multihead = MultiheadAttention(self.num_hidden_per_attn) self.local_gaussian = local_gaussian if local_gaussian: self.local_gaussian_factor = Variable(t.tensor(30.0), requires_grad=True).float() else: self.local_gaussian_factor = None self.residual_dropout = nn.Dropout(p=0.1) self.final_linear = Linear(num_hidden * 2, num_hidden) self.layer_norm_1 = nn.LayerNorm(num_hidden) def forward(self, input_0, input_1): primals_3 = self.key.linear_layer.weight primals_4 = self.value.linear_layer.weight primals_5 = self.query.linear_layer.weight primals_6 = self.final_linear.linear_layer.weight primals_7 = self.final_linear.linear_layer.bias primals_8 = self.layer_norm_1.weight primals_9 = self.layer_norm_1.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1] class Linear(nn.Module): """Linear Module.""" def __init__(self, in_dim, out_dim, bias=True, w_init='linear'): """init.""" super(Linear, self).__init__() self.linear_layer = nn.Linear(in_dim, out_dim, bias=bias) nn.init.xavier_uniform_(self.linear_layer.weight, gain=nn.init. calculate_gain(w_init)) def forward(self, x): """forward.""" return self.linear_layer(x)
pppku/SVS_system
Attention
false
16,290
[ "Apache-2.0" ]
78
95ef1076c51bfc0b74349b8058a9c918ff24c500
https://github.com/pppku/SVS_system/tree/95ef1076c51bfc0b74349b8058a9c918ff24c500
BahdanauAttention
import torch from torch import nn class BahdanauAttention(nn.Module): def __init__(self, dim): super(BahdanauAttention, self).__init__() self.query_layer = nn.Linear(dim, dim, bias=False) self.tanh = nn.Tanh() self.v = nn.Linear(dim, 1, bias=False) def forward(self, query, processed_memory): """ Args: query: (batch, 1, dim) or (batch, dim) processed_memory: (batch, max_time, dim) """ if query.dim() == 2: query = query.unsqueeze(1) processed_query = self.query_layer(query) alignment = self.v(self.tanh(processed_query + processed_memory)) return alignment.squeeze(-1) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_tanh_0[grid(256)](buf1, primals_3, 256, XBLOCK =256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf2) return reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, primals_4 class BahdanauAttentionNew(nn.Module): def __init__(self, dim): super(BahdanauAttentionNew, self).__init__() self.query_layer = nn.Linear(dim, dim, bias=False) self.tanh = nn.Tanh() self.v = nn.Linear(dim, 1, bias=False) def forward(self, input_0, input_1): primals_2 = self.query_layer.weight primals_4 = self.v.weight primals_1 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
puppyapple/tacotron_pytorch
BahdanauAttention
false
16,292
[ "MIT" ]
278
800bf8b0538c91f1104e99d8e7c1b645bb6154d3
https://github.com/puppyapple/tacotron_pytorch/tree/800bf8b0538c91f1104e99d8e7c1b645bb6154d3
ClassWisePool
import sys from torch.autograd import Function import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data class ClassWisePoolFunction(Function): @staticmethod def forward(ctx, input, num_maps): batch_size, num_channels, h, w = input.size() if num_channels % num_maps != 0: None sys.exit(-1) num_outputs = int(num_channels / num_maps) x = input.view(batch_size, num_outputs, num_maps, h, w) output = torch.sum(x, 2) ctx.save_for_backward(input) ctx.num_maps = num_maps return output.view(batch_size, num_outputs, h, w) / num_maps @staticmethod def backward(ctx, grad_output): input, = ctx.saved_tensors batch_size, num_channels, h, w = input.size() num_outputs = grad_output.size(1) grad_input = grad_output.view(batch_size, num_outputs, 1, h, w).expand( batch_size, num_outputs, ctx.num_maps, h, w).contiguous() return grad_input.view(batch_size, num_channels, h, w), None class ClassWisePool(nn.Module): def __init__(self, num_maps): super(ClassWisePool, self).__init__() self.num_maps = num_maps def forward(self, input): return ClassWisePoolFunction.apply(input, self.num_maps) def __repr__(self): return self.__class__.__name__ + ' (num_maps={num_maps})'.format( num_maps=self.num_maps) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_maps': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import sys from torch.autograd import Function import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_sum_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class ClassWisePoolFunction(Function): @staticmethod def forward(ctx, input, num_maps): batch_size, num_channels, h, w = input.size() if num_channels % num_maps != 0: None sys.exit(-1) num_outputs = int(num_channels / num_maps) x = input.view(batch_size, num_outputs, num_maps, h, w) output = torch.sum(x, 2) ctx.save_for_backward(input) ctx.num_maps = num_maps return output.view(batch_size, num_outputs, h, w) / num_maps @staticmethod def backward(ctx, grad_output): input, = ctx.saved_tensors batch_size, num_channels, h, w = input.size() num_outputs = grad_output.size(1) grad_input = grad_output.view(batch_size, num_outputs, 1, h, w).expand( batch_size, num_outputs, ctx.num_maps, h, w).contiguous() return grad_input.view(batch_size, num_channels, h, w), None class ClassWisePoolNew(nn.Module): def __init__(self, num_maps): super(ClassWisePoolNew, self).__init__() self.num_maps = num_maps def __repr__(self): return self.__class__.__name__ + ' (num_maps={num_maps})'.format( num_maps=self.num_maps) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
pyushkevich/wildcat.pytorch
ClassWisePool
false
16,293
[ "MIT" ]
273
2046cde4e4a350eb1172fe60035448aa8df632d5
https://github.com/pyushkevich/wildcat.pytorch/tree/2046cde4e4a350eb1172fe60035448aa8df632d5
BertLayerNorm
import torch import torch.nn as nn import torch.cuda import torch.onnx.utils import torch.random import torch.cuda.random import torch.utils.cpp_extension class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): do_convert = self.weight.dtype != x.dtype if do_convert: x = x u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) x = self.weight * x + self.bias if do_convert: x = x return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.cuda import torch.onnx.utils import torch.random import torch.cuda.random import torch.utils.cpp_extension assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_pow_sqrt_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = 4.0 tmp14 = tmp12 / tmp13 tmp15 = 1e-12 tmp16 = tmp14 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp1 / tmp17 tmp19 = tmp0 * tmp18 tmp21 = tmp19 + tmp20 tl.store(out_ptr0 + x2, tmp21, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_sub_0[grid(256)](primals_2, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_pow_sqrt_1[grid(256)](primals_1, buf0, primals_3, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_1 del primals_3 return buf1, primals_2 class BertLayerNormNew(nn.Module): def __init__(self, hidden_size, eps=1e-12): super(BertLayerNormNew, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
nict-wisdom/rannc
BertLayerNorm
false
16,294
[ "MIT" ]
45
a1708807e053e2d58b7f6d6ed925f03aa8504416
https://github.com/nict-wisdom/rannc/tree/a1708807e053e2d58b7f6d6ed925f03aa8504416
ReLUDropout
import torch import torch.utils.data import torch.cuda import torch.utils.checkpoint def relu_dropout(x, p=0, training=False, variational=False, batch_first=False): if not training or p == 0: return x.clamp_(min=0) p1m = 1 - p if variational: if batch_first: mask = torch.rand_like(x[:, 0, :]) > p1m mask = mask.unsqueeze(1).repeat(1, x.size(1), 1) else: mask = torch.rand_like(x[0]) > p1m mask = mask.unsqueeze(0).repeat(x.size(0), 1, 1) else: mask = torch.rand_like(x) > p1m mask |= x < 0 return x.masked_fill_(mask, 0).div_(p1m) class ReLUDropout(torch.nn.Dropout): def __init__(self, p=0.5, variational=False, batch_first=False, inplace =False): super().__init__(p, inplace=True) self.variational = variational self.batch_first = batch_first def forward(self, input): return relu_dropout(input, p=self.p, training=self.training, variational=self.variational, batch_first=self.batch_first) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch.cuda import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_clamp_0(in_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tl.store(out_ptr1 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) get_raw_stream(0) triton_poi_fused_clamp_0[grid(256)](arg0_1, arg0_1, 256, XBLOCK=256, num_warps=4, num_stages=1) return arg0_1, def relu_dropout(x, p=0, training=False, variational=False, batch_first=False): if not training or p == 0: return x.clamp_(min=0) p1m = 1 - p if variational: if batch_first: mask = torch.rand_like(x[:, 0, :]) > p1m mask = mask.unsqueeze(1).repeat(1, x.size(1), 1) else: mask = torch.rand_like(x[0]) > p1m mask = mask.unsqueeze(0).repeat(x.size(0), 1, 1) else: mask = torch.rand_like(x) > p1m mask |= x < 0 return x.masked_fill_(mask, 0).div_(p1m) class ReLUDropoutNew(torch.nn.Dropout): def __init__(self, p=0.5, variational=False, batch_first=False, inplace =False): super().__init__(p, inplace=True) self.variational = variational self.batch_first = batch_first def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
quanpn90/NMTGMinor
ReLUDropout
false
16,295
[ "MIT" ]
75
0e5f989c8bc01c6c8dc3a8c1ce7c05bfd884b796
https://github.com/quanpn90/NMTGMinor/tree/0e5f989c8bc01c6c8dc3a8c1ce7c05bfd884b796
Invertible1x1Conv
import torch import torch.utils.data from torch import nn class Flow(nn.Module): """ Generic class for flow functions """ def __init__(self): super().__init__() def forward(self, z): """ :param z: input variable, first dimension is batch dim :return: transformed z and log of absolute determinant """ raise NotImplementedError('Forward pass has not been implemented.') def inverse(self, z): raise NotImplementedError('This flow has no algebraic inverse.') class Invertible1x1Conv(Flow): """ Invertible 1x1 convolution introduced in the Glow paper Assumes 4d input/output tensors of the form NCHW """ def __init__(self, num_channels, use_lu=False): """ Constructor :param num_channels: Number of channels of the data :param use_lu: Flag whether to parametrize weights through the LU decomposition """ super().__init__() self.num_channels = num_channels self.use_lu = use_lu Q = torch.qr(torch.randn(self.num_channels, self.num_channels))[0] if use_lu: P, L, U = torch.lu_unpack(*Q.lu()) self.register_buffer('P', P) self.L = nn.Parameter(L) S = U.diag() self.register_buffer('sign_S', torch.sign(S)) self.log_S = nn.Parameter(torch.log(torch.abs(S))) self.U = nn.Parameter(torch.triu(U, diagonal=1)) self.register_buffer('eye', torch.diag(torch.ones(self. num_channels))) else: self.W = nn.Parameter(Q) def _assemble_W(self, inverse=False): L = torch.tril(self.L, diagonal=-1) + self.eye U = torch.triu(self.U, diagonal=1) + torch.diag(self.sign_S * torch .exp(self.log_S)) if inverse: if self.log_S.dtype == torch.float64: L_inv = torch.inverse(L) U_inv = torch.inverse(U) else: L_inv = torch.inverse(L.double()).type(self.log_S.dtype) U_inv = torch.inverse(U.double()).type(self.log_S.dtype) W = U_inv @ L_inv @ self.P.t() else: W = self.P @ L @ U return W def forward(self, z): if self.use_lu: W = self._assemble_W(inverse=True) log_det = -torch.sum(self.log_S) else: W_dtype = self.W.dtype if W_dtype == torch.float64: W = torch.inverse(self.W) else: W = torch.inverse(self.W.double()).type(W_dtype) W = W.view(*W.size(), 1, 1) log_det = -torch.slogdet(self.W)[1] W = W.view(self.num_channels, self.num_channels, 1, 1) z_ = torch.nn.functional.conv2d(z, W) log_det = log_det * z.size(2) * z.size(3) return z_, log_det def inverse(self, z): if self.use_lu: W = self._assemble_W() log_det = torch.sum(self.log_S) else: W = self.W log_det = torch.slogdet(self.W)[1] W = W.view(self.num_channels, self.num_channels, 1, 1) z_ = torch.nn.functional.conv2d(z, W) log_det = log_det * z.size(2) * z.size(3) return z_, log_det def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.float64) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused__to_copy_convolution_view_1(in_ptr0, out_ptr1, out_ptr2, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tmp1 = tmp0.to(tl.float32) tl.store(out_ptr1 + (y0 + 4 * x1), tmp1, xmask & ymask) tl.store(out_ptr2 + (x1 + 4 * y0), tmp1, xmask & ymask) @triton.jit def triton_poi_fused_mul_neg_2(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_out_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = -tmp1 tmp3 = 4.0 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp3 tl.store(in_out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp5, None) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (1, 4)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (1, 4), torch.float64) get_raw_stream(0) triton_poi_fused__to_copy_0[grid(16)](primals_1, buf0, 16, XBLOCK= 16, num_warps=1, num_stages=1) buf1 = torch.ops.aten.linalg_inv_ex.default(buf0) del buf0 buf2 = buf1[0] del buf1 buf5 = empty_strided_cuda((4, 4, 1, 1), (1, 4, 4, 4), torch.float32) buf11 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused__to_copy_convolution_view_1[grid(4, 4)](buf2, buf5, buf11, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf6 = torch.ops.aten._linalg_slogdet.default(primals_1) del primals_1 buf8 = buf6[1] buf9 = buf6[2] buf10 = buf6[3] del buf6 buf12 = extern_kernels.convolution(primals_2, buf11, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 4, 4, 4), (64, 16, 4, 1)) del buf11 buf13 = buf8 del buf8 triton_poi_fused_mul_neg_2[grid(1)](buf13, 1, XBLOCK=1, num_warps=1, num_stages=1) return buf12, buf13, primals_2, buf5, buf9, buf10, reinterpret_tensor(buf2, (4, 4), (4, 1), 0) class Flow(nn.Module): """ Generic class for flow functions """ def __init__(self): super().__init__() def forward(self, z): """ :param z: input variable, first dimension is batch dim :return: transformed z and log of absolute determinant """ raise NotImplementedError('Forward pass has not been implemented.') def inverse(self, z): raise NotImplementedError('This flow has no algebraic inverse.') class Invertible1x1ConvNew(Flow): """ Invertible 1x1 convolution introduced in the Glow paper Assumes 4d input/output tensors of the form NCHW """ def __init__(self, num_channels, use_lu=False): """ Constructor :param num_channels: Number of channels of the data :param use_lu: Flag whether to parametrize weights through the LU decomposition """ super().__init__() self.num_channels = num_channels self.use_lu = use_lu Q = torch.qr(torch.randn(self.num_channels, self.num_channels))[0] if use_lu: P, L, U = torch.lu_unpack(*Q.lu()) self.register_buffer('P', P) self.L = nn.Parameter(L) S = U.diag() self.register_buffer('sign_S', torch.sign(S)) self.log_S = nn.Parameter(torch.log(torch.abs(S))) self.U = nn.Parameter(torch.triu(U, diagonal=1)) self.register_buffer('eye', torch.diag(torch.ones(self. num_channels))) else: self.W = nn.Parameter(Q) def _assemble_W(self, inverse=False): L = torch.tril(self.L, diagonal=-1) + self.eye U = torch.triu(self.U, diagonal=1) + torch.diag(self.sign_S * torch .exp(self.log_S)) if inverse: if self.log_S.dtype == torch.float64: L_inv = torch.inverse(L) U_inv = torch.inverse(U) else: L_inv = torch.inverse(L.double()).type(self.log_S.dtype) U_inv = torch.inverse(U.double()).type(self.log_S.dtype) W = U_inv @ L_inv @ self.P.t() else: W = self.P @ L @ U return W def inverse(self, z): if self.use_lu: W = self._assemble_W() log_det = torch.sum(self.log_S) else: W = self.W log_det = torch.slogdet(self.W)[1] W = W.view(self.num_channels, self.num_channels, 1, 1) z_ = torch.nn.functional.conv2d(z, W) log_det = log_det * z.size(2) * z.size(3) return z_, log_det def forward(self, input_0): primals_1 = self.W primals_2 = input_0 output = call([primals_1, primals_2]) return output[0], output[1]
pkulwj1994/normalizing-flows
Invertible1x1Conv
false
16,296
[ "MIT" ]
96
326321c4aea4a3f6ab703f82e21277a79cd7d9e4
https://github.com/pkulwj1994/normalizing-flows/tree/326321c4aea4a3f6ab703f82e21277a79cd7d9e4
AttendNodeModule
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class AttendNodeModule(nn.Module): def forward(self, node_vectors, query): """ Args: node_vectors [Tensor] (num_node, dim_v) : node feature vectors query [Tensor] (dim_v, ) : query vector Returns: attn [Tensor] (num_node, ): node attention by *SOFTMAX*. Actually it is attribute value attention, because we regard attribute values as nodes """ logit = torch.matmul(node_vectors, query) attn = F.softmax(logit, dim=0) return attn def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf1 return buf2, class AttendNodeModuleNew(nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
qiuyue1993/XNM-Net
AttendNodeModule
false
16,297
[ "MIT" ]
95
1c4a16fd745d9e90e0d7a08b21e7efca4d2c6195
https://github.com/qiuyue1993/XNM-Net/tree/1c4a16fd745d9e90e0d7a08b21e7efca4d2c6195
SmoothL1Loss
import torch import torch.utils.data def smooth_l1_loss(pred, target, weight, beta): val = target - pred abs_val = val.abs() smooth_mask = abs_val < beta return weight * torch.where(smooth_mask, 0.5 / beta * val ** 2, abs_val - 0.5 * beta).sum(dim=-1) class SmoothL1Loss(torch.nn.Module): def __init__(self, beta=1.0 / 9): super(SmoothL1Loss, self).__init__() self.beta = beta def forward(self, input, target, size_average=True): return smooth_l1_loss(input, target, self.beta, size_average) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_abs_lt_mul_pow_sub_sum_where_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp32 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1.0 tmp5 = tmp3 < tmp4 tmp6 = tmp2 * tmp2 tmp7 = 0.5 tmp8 = tmp6 * tmp7 tmp9 = tmp3 - tmp7 tmp10 = tl.where(tmp5, tmp8, tmp9) tmp13 = tmp11 - tmp12 tmp14 = tl_math.abs(tmp13) tmp15 = tmp14 < tmp4 tmp16 = tmp13 * tmp13 tmp17 = tmp16 * tmp7 tmp18 = tmp14 - tmp7 tmp19 = tl.where(tmp15, tmp17, tmp18) tmp20 = tmp10 + tmp19 tmp23 = tmp21 - tmp22 tmp24 = tl_math.abs(tmp23) tmp25 = tmp24 < tmp4 tmp26 = tmp23 * tmp23 tmp27 = tmp26 * tmp7 tmp28 = tmp24 - tmp7 tmp29 = tl.where(tmp25, tmp27, tmp28) tmp30 = tmp20 + tmp29 tmp33 = tmp31 - tmp32 tmp34 = tl_math.abs(tmp33) tmp35 = tmp34 < tmp4 tmp36 = tmp33 * tmp33 tmp37 = tmp36 * tmp7 tmp38 = tmp34 - tmp7 tmp39 = tl.where(tmp35, tmp37, tmp38) tmp40 = tmp30 + tmp39 tmp41 = 0.1111111111111111 tmp42 = tmp40 * tmp41 tl.store(in_out_ptr0 + x0, tmp42, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_abs_lt_mul_pow_sub_sum_where_0[grid(64)](buf1, arg1_1, arg0_1, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf1, def smooth_l1_loss(pred, target, weight, beta): val = target - pred abs_val = val.abs() smooth_mask = abs_val < beta return weight * torch.where(smooth_mask, 0.5 / beta * val ** 2, abs_val - 0.5 * beta).sum(dim=-1) class SmoothL1LossNew(torch.nn.Module): def __init__(self, beta=1.0 / 9): super(SmoothL1LossNew, self).__init__() self.beta = beta def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
qilei123/FreeAnchor
SmoothL1Loss
false
16,298
[ "MIT" ]
495
80361a7addb7d84a50863a6b34734d28034c7256
https://github.com/qilei123/FreeAnchor/tree/80361a7addb7d84a50863a6b34734d28034c7256
FillUpLuminance
import torch class FillUpLuminance(torch.nn.Module): def __init__(self): super(FillUpLuminance, self).__init__() def forward(self, color, luminance): return color + (1 - color) * luminance def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_rsub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_rsub_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class FillUpLuminanceNew(torch.nn.Module): def __init__(self): super(FillUpLuminanceNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
qway/nerfmeshes
FillUpLuminance
false
16,299
[ "MIT" ]
113
d983dcbbcfec1337c9f2040969213c6d1ea0c39e
https://github.com/qway/nerfmeshes/tree/d983dcbbcfec1337c9f2040969213c6d1ea0c39e
CmapPafHead
import torch import torch.utils.data import torch.nn import torch.optim class UpsampleCBR(torch.nn.Sequential): def __init__(self, input_channels, output_channels, count=1, num_flat=0): layers = [] for i in range(count): if i == 0: inch = input_channels else: inch = output_channels layers += [torch.nn.ConvTranspose2d(inch, output_channels, kernel_size=4, stride=2, padding=1), torch.nn.BatchNorm2d( output_channels), torch.nn.ReLU()] for i in range(num_flat): layers += [torch.nn.Conv2d(output_channels, output_channels, kernel_size=3, stride=1, padding=1), torch.nn. BatchNorm2d(output_channels), torch.nn.ReLU()] super(UpsampleCBR, self).__init__(*layers) class CmapPafHead(torch.nn.Module): def __init__(self, input_channels, cmap_channels, paf_channels, upsample_channels=256, num_upsample=0, num_flat=0): super(CmapPafHead, self).__init__() if num_upsample > 0: self.cmap_conv = torch.nn.Sequential(UpsampleCBR(input_channels, upsample_channels, num_upsample, num_flat), torch.nn.Conv2d (upsample_channels, cmap_channels, kernel_size=1, stride=1, padding=0)) self.paf_conv = torch.nn.Sequential(UpsampleCBR(input_channels, upsample_channels, num_upsample, num_flat), torch.nn.Conv2d (upsample_channels, paf_channels, kernel_size=1, stride=1, padding=0)) else: self.cmap_conv = torch.nn.Conv2d(input_channels, cmap_channels, kernel_size=1, stride=1, padding=0) self.paf_conv = torch.nn.Conv2d(input_channels, paf_channels, kernel_size=1, stride=1, padding=0) def forward(self, x): return self.cmap_conv(x), self.paf_conv(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channels': 4, 'cmap_channels': 4, 'paf_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_0[grid(256)](buf3, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 return buf1, buf3, primals_1, primals_3, primals_4 class UpsampleCBR(torch.nn.Sequential): def __init__(self, input_channels, output_channels, count=1, num_flat=0): layers = [] for i in range(count): if i == 0: inch = input_channels else: inch = output_channels layers += [torch.nn.ConvTranspose2d(inch, output_channels, kernel_size=4, stride=2, padding=1), torch.nn.BatchNorm2d( output_channels), torch.nn.ReLU()] for i in range(num_flat): layers += [torch.nn.Conv2d(output_channels, output_channels, kernel_size=3, stride=1, padding=1), torch.nn. BatchNorm2d(output_channels), torch.nn.ReLU()] super(UpsampleCBR, self).__init__(*layers) class CmapPafHeadNew(torch.nn.Module): def __init__(self, input_channels, cmap_channels, paf_channels, upsample_channels=256, num_upsample=0, num_flat=0): super(CmapPafHeadNew, self).__init__() if num_upsample > 0: self.cmap_conv = torch.nn.Sequential(UpsampleCBR(input_channels, upsample_channels, num_upsample, num_flat), torch.nn.Conv2d (upsample_channels, cmap_channels, kernel_size=1, stride=1, padding=0)) self.paf_conv = torch.nn.Sequential(UpsampleCBR(input_channels, upsample_channels, num_upsample, num_flat), torch.nn.Conv2d (upsample_channels, paf_channels, kernel_size=1, stride=1, padding=0)) else: self.cmap_conv = torch.nn.Conv2d(input_channels, cmap_channels, kernel_size=1, stride=1, padding=0) self.paf_conv = torch.nn.Conv2d(input_channels, paf_channels, kernel_size=1, stride=1, padding=0) def forward(self, input_0): primals_1 = self.cmap_conv.weight primals_2 = self.cmap_conv.bias primals_4 = self.paf_conv.weight primals_5 = self.paf_conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
quantd2/trt_pose
CmapPafHead
false
16,300
[ "MIT" ]
738
44c5e826977f20c8dad2d9725313a18cb2189853
https://github.com/quantd2/trt_pose/tree/44c5e826977f20c8dad2d9725313a18cb2189853