entry_point
stringlengths 1
65
| original_triton_python_code
stringlengths 208
619k
| optimised_triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
|---|---|---|---|---|---|---|---|---|---|---|
C3
|
import torch
import torch.nn as nn
from collections import OrderedDict
class C3(nn.Module):
def __init__(self):
super(C3, self).__init__()
self.c3 = nn.Sequential(OrderedDict([('c3', nn.Conv2d(32, 64,
kernel_size=(3, 3), bias=32)), ('relu3', nn.ReLU())]))
def forward(self, img):
output = self.c3(img)
return output
def get_inputs():
return [torch.rand([4, 32, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from collections import OrderedDict
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 984064
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 3844 % 64
x0 = xindex % 3844
x3 = xindex // 3844
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x0 + 3968 * x3), tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 32, 64, 64), (131072, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 62, 62), (246016, 3844, 62, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 64, 62, 62), (253952, 3968, 62, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(984064)](
buf1, primals_2, buf2, 984064, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_2
return buf1, primals_1, primals_3, buf2
class C3New(nn.Module):
def __init__(self):
super(C3New, self).__init__()
self.c3 = nn.Sequential(OrderedDict([('c3', nn.Conv2d(32, 64,
kernel_size=(3, 3), bias=32)), ('relu3', nn.ReLU())]))
def forward(self, input_0):
primals_1 = self.c3.c3.weight
primals_2 = self.c3.c3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
devillove084/DeepSignal
|
C3
| false
| 12,260
|
[
"MIT"
] | 0
|
1fe122b32752b11e10ca4bef3d07ddd7de4348b5
|
https://github.com/devillove084/DeepSignal/tree/1fe122b32752b11e10ca4bef3d07ddd7de4348b5
|
L2Norm
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class L2Norm(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
assert x.dim(
) == 2, 'the input tensor of L2Norm must be the shape of [B, C]'
return F.normalize(x, p=2, dim=-1)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class L2NormNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
deokhk/Proxy-Anchor-CVPR2020
|
L2Norm
| false
| 12,261
|
[
"MIT"
] | 0
|
acb3a16c3ebc8b8777542898ec83de32aa8ba64e
|
https://github.com/deokhk/Proxy-Anchor-CVPR2020/tree/acb3a16c3ebc8b8777542898ec83de32aa8ba64e
|
MaskedMSE
|
import torch
import torch.nn as nn
class MaskedMSE(nn.Module):
def __init__(self):
super(MaskedMSE, self).__init__()
self.criterion = nn.MSELoss()
def forward(self, input, target, gamma=2.0):
mask = gamma * target / (target + 1e-07)
self.loss = self.criterion(input * mask, target * mask)
return self.loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = 2.0
tmp3 = tmp1 * tmp2
tmp4 = 1e-07
tmp5 = tmp1 + tmp4
tmp6 = tmp3 / tmp5
tmp7 = tmp0 * tmp6
tmp8 = tmp1 * tmp6
tmp9 = tmp7 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp15, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mse_loss_mul_0[grid(1)](buf1, arg1_1,
arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class MaskedMSENew(nn.Module):
def __init__(self):
super(MaskedMSENew, self).__init__()
self.criterion = nn.MSELoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
dhruvramani/MaskedMSE
|
MaskedMSE
| false
| 12,262
|
[
"MIT"
] | 0
|
76ff94add5659217a3f4f21e60a4f069defede29
|
https://github.com/dhruvramani/MaskedMSE/tree/76ff94add5659217a3f4f21e60a4f069defede29
|
C1
|
import torch
import torch.nn as nn
from collections import OrderedDict
class C1(nn.Module):
def __init__(self) ->None:
super(C1, self).__init__()
self.c1 = nn.Sequential(OrderedDict([('c1', nn.Conv2d(3, 16,
kernel_size=(3, 3), bias=True)), ('relu1', nn.ReLU()), ('s1',
nn.MaxPool2d(kernel_size=(2, 2), stride=2))]))
def forward(self, img):
output = self.c1(img)
return output
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from collections import OrderedDict
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 246016
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3844 % 16
x0 = xindex % 3844
x4 = xindex // 3844
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3872 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 61504
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 31
x1 = xindex // 31 % 31
x2 = xindex // 961
x5 = xindex
x4 = xindex // 15376
x6 = xindex % 15376
tmp0 = tl.load(in_ptr0 + (2 * x0 + 124 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 124 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (62 + 2 * x0 + 124 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (63 + 2 * x0 + 124 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x5, tmp6, xmask)
tl.store(out_ptr1 + (x6 + 15488 * x4), tmp16, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 62, 62), (61504, 3844, 62, 1))
buf1 = empty_strided_cuda((4, 16, 62, 62), (61952, 3872, 62, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(246016)](buf0, primals_2,
buf1, 246016, XBLOCK=512, num_warps=8, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 16, 31, 31), (15376, 961, 31, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 16, 31, 31), (15488, 961, 31, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(61504)](buf1, buf2,
buf3, 61504, XBLOCK=512, num_warps=4, num_stages=1)
return buf2, primals_1, primals_3, buf1, buf3
class C1New(nn.Module):
def __init__(self) ->None:
super(C1New, self).__init__()
self.c1 = nn.Sequential(OrderedDict([('c1', nn.Conv2d(3, 16,
kernel_size=(3, 3), bias=True)), ('relu1', nn.ReLU()), ('s1',
nn.MaxPool2d(kernel_size=(2, 2), stride=2))]))
def forward(self, input_0):
primals_1 = self.c1.c1.weight
primals_2 = self.c1.c1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
devillove084/DeepSignal
|
C1
| false
| 12,263
|
[
"MIT"
] | 0
|
1fe122b32752b11e10ca4bef3d07ddd7de4348b5
|
https://github.com/devillove084/DeepSignal/tree/1fe122b32752b11e10ca4bef3d07ddd7de4348b5
|
ContractingBlock
|
import torch
from torch import nn
class ContractingBlock(nn.Module):
def __init__(self, input_channels, use_bn=True, kernel_size=3,
activation='relu'):
super(ContractingBlock, self).__init__()
self.conv1 = nn.Conv2d(input_channels, input_channels * 2,
kernel_size=kernel_size, padding=1, stride=2, padding_mode=
'reflect')
self.activation = nn.ReLU() if activation == 'relu' else nn.LeakyReLU(
0.2)
if use_bn:
self.instancenorm = nn.InstanceNorm2d(input_channels * 2)
self.use_bn = use_bn
def forward(self, x):
x = self.conv1(x)
if self.use_bn:
x = self.instancenorm(x)
x = self.activation(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 8
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_2(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_3(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = 0.0
tmp8 = tmp6 <= tmp7
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (8, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(576)](primals_3, buf0, 576,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 8, 2, 2), (32, 4, 2, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(128)](buf2, primals_2, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32
)
buf4 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32
)
triton_poi_fused__native_batch_norm_legit_2[grid(32)](buf2, buf3,
buf4, 32, XBLOCK=32, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 8, 2, 2), (32, 4, 2, 1), torch.float32)
buf6 = empty_strided_cuda((4, 8, 2, 2), (32, 4, 2, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_3[grid(128)](buf2, buf3,
buf4, buf5, buf6, 128, XBLOCK=128, num_warps=4, num_stages=1)
del buf3
del buf4
return buf5, primals_1, buf0, buf2, buf6
class ContractingBlockNew(nn.Module):
def __init__(self, input_channels, use_bn=True, kernel_size=3,
activation='relu'):
super(ContractingBlockNew, self).__init__()
self.conv1 = nn.Conv2d(input_channels, input_channels * 2,
kernel_size=kernel_size, padding=1, stride=2, padding_mode=
'reflect')
self.activation = nn.ReLU() if activation == 'relu' else nn.LeakyReLU(
0.2)
if use_bn:
self.instancenorm = nn.InstanceNorm2d(input_channels * 2)
self.use_bn = use_bn
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
diegushko/CycleGAN
|
ContractingBlock
| false
| 12,264
|
[
"MIT"
] | 0
|
630d1cd00cef3f09f036d3c734d31c772cc0a786
|
https://github.com/diegushko/CycleGAN/tree/630d1cd00cef3f09f036d3c734d31c772cc0a786
|
h_swish
|
import torch
import torch.nn as nn
class h_sigmoid(nn.Module):
def __init__(self, inplace=True):
super(h_sigmoid, self).__init__()
self.relu = nn.ReLU6(inplace=inplace)
def forward(self, x):
return self.relu(x + 3) / 6
class h_swish(nn.Module):
def __init__(self, inplace=True):
super(h_swish, self).__init__()
self.sigmoid = h_sigmoid(inplace=inplace)
def forward(self, x):
return x * self.sigmoid(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tmp9 = tmp0 * tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_mul_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class h_sigmoid(nn.Module):
def __init__(self, inplace=True):
super(h_sigmoid, self).__init__()
self.relu = nn.ReLU6(inplace=inplace)
def forward(self, x):
return self.relu(x + 3) / 6
class h_swishNew(nn.Module):
def __init__(self, inplace=True):
super(h_swishNew, self).__init__()
self.sigmoid = h_sigmoid(inplace=inplace)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
dhananjaisharma10/mmdetection
|
h_swish
| false
| 12,265
|
[
"Apache-2.0"
] | 0
|
6f6db3211c3760cffe9db2350297c42cc29ce140
|
https://github.com/dhananjaisharma10/mmdetection/tree/6f6db3211c3760cffe9db2350297c42cc29ce140
|
Mlp
|
import math
import torch
import torch.utils.data
import torch
import torch.nn as nn
def gelu(x):
""" Original Implementation of the gelu activation function in Google Bert repo when initialy created.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=gelu, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.utils.data
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_erf_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865475
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_erf_mul_0[grid(256)](buf0, buf1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4
def gelu(x):
""" Original Implementation of the gelu activation function in Google Bert repo when initialy created.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class MlpNew(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=gelu, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
denisleonov/pytorch-CycleGAN-and-pix2pix
|
Mlp
| false
| 12,266
|
[
"BSD-3-Clause"
] | 0
|
d1a5f0c5911f70ed896f826619b4067ce737a83d
|
https://github.com/denisleonov/pytorch-CycleGAN-and-pix2pix/tree/d1a5f0c5911f70ed896f826619b4067ce737a83d
|
FeatureMapBlock
|
import torch
from torch import nn
class FeatureMapBlock(nn.Module):
def __init__(self, input_channels, output_channels):
super(FeatureMapBlock, self).__init__()
self.conv = nn.Conv2d(input_channels, output_channels, kernel_size=
7, padding=3, padding_mode='reflect')
def forward(self, x):
x = self.conv(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channels': 4, 'output_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 10
x1 = xindex // 10 % 10
x2 = xindex // 100
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-3 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-3 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 7, 7), (196, 49, 7, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 10, 10), (400, 100, 10, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(1600)](primals_3, buf0,
1600, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(256)](buf2, primals_2, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf2, primals_1, buf0
class FeatureMapBlockNew(nn.Module):
def __init__(self, input_channels, output_channels):
super(FeatureMapBlockNew, self).__init__()
self.conv = nn.Conv2d(input_channels, output_channels, kernel_size=
7, padding=3, padding_mode='reflect')
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
diegushko/CycleGAN
|
FeatureMapBlock
| false
| 12,267
|
[
"MIT"
] | 0
|
630d1cd00cef3f09f036d3c734d31c772cc0a786
|
https://github.com/diegushko/CycleGAN/tree/630d1cd00cef3f09f036d3c734d31c772cc0a786
|
PrecomputedNorm
|
import torch
import torch.nn as nn
class PrecomputedNorm(nn.Module):
"""Normalization using Pre-computed Mean/Std.
Args:
stats: Precomputed (mean, std).
axis: Axis setting used to calculate mean/variance.
"""
def __init__(self, stats, axis=[1, 2]):
super().__init__()
self.axis = axis
self.mean, self.std = stats
def forward(self, X: 'torch.Tensor') ->torch.Tensor:
return (X - self.mean) / self.std
def __repr__(self):
format_string = (self.__class__.__name__ +
f'(mean={self.mean}, std={self.std}, axis={self.axis})')
return format_string
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'stats': [4, 4]}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 4.0
tmp2 = tmp0 - tmp1
tmp3 = 0.25
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class PrecomputedNormNew(nn.Module):
"""Normalization using Pre-computed Mean/Std.
Args:
stats: Precomputed (mean, std).
axis: Axis setting used to calculate mean/variance.
"""
def __init__(self, stats, axis=[1, 2]):
super().__init__()
self.axis = axis
self.mean, self.std = stats
def __repr__(self):
format_string = (self.__class__.__name__ +
f'(mean={self.mean}, std={self.std}, axis={self.axis})')
return format_string
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
czlwang/s3prl
|
PrecomputedNorm
| false
| 12,268
|
[
"Apache-2.0"
] | 0
|
81d4bb8d051cee20fa87c083b8478999e1766172
|
https://github.com/czlwang/s3prl/tree/81d4bb8d051cee20fa87c083b8478999e1766172
|
AMSoftmaxLoss
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class AMSoftmaxLoss(nn.Module):
def __init__(self, hidden_dim, speaker_num, s=30.0, m=0.4, **kwargs):
"""
AM Softmax Loss
"""
super(AMSoftmaxLoss, self).__init__()
self.s = s
self.m = m
self.speaker_num = speaker_num
self.W = torch.nn.Parameter(torch.randn(hidden_dim, speaker_num),
requires_grad=True)
nn.init.xavier_normal_(self.W, gain=1)
def forward(self, x_BxH, labels_B):
"""
x shape: (B, H)
labels shape: (B)
"""
assert len(x_BxH) == len(labels_B)
assert torch.min(labels_B) >= 0
assert torch.max(labels_B) < self.speaker_num
W = F.normalize(self.W, dim=0)
x_BxH = F.normalize(x_BxH, dim=1)
wf = torch.mm(x_BxH, W)
numerator = self.s * (torch.diagonal(wf.transpose(0, 1)[labels_B]) -
self.m)
excl = torch.cat([torch.cat((wf[i, :y], wf[i, y + 1:])).unsqueeze(0
) for i, y in enumerate(labels_B)], dim=0)
denominator = torch.exp(numerator) + torch.sum(torch.exp(self.s *
excl), dim=1)
L = numerator - torch.log(denominator)
return -torch.mean(L)
def get_inputs():
return [torch.rand([4, 4]), torch.ones([4], dtype=torch.int64)]
def get_init_inputs():
return [[], {'hidden_dim': 4, 'speaker_num': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_mul_sub_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask,
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp7 = 0.4
tmp8 = tmp6 - tmp7
tmp9 = 30.0
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_1[grid(16)](primals_3, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, buf1, out=buf2)
del buf1
buf3 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_mul_sub_2[grid(4)](primals_2, buf2, buf3, 4,
XBLOCK=4, num_warps=1, num_stages=1)
return buf3, buf2, primals_2, primals_3, reinterpret_tensor(buf0, (4, 4
), (1, 4), 0)
class AMSoftmaxLossNew(nn.Module):
def __init__(self, hidden_dim, speaker_num, s=30.0, m=0.4, **kwargs):
"""
AM Softmax Loss
"""
super(AMSoftmaxLossNew, self).__init__()
self.s = s
self.m = m
self.speaker_num = speaker_num
self.W = torch.nn.Parameter(torch.randn(hidden_dim, speaker_num),
requires_grad=True)
nn.init.xavier_normal_(self.W, gain=1)
def forward(self, input_0, input_1):
primals_1 = self.W
primals_3 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
czlwang/s3prl
|
AMSoftmaxLoss
| false
| 12,269
|
[
"Apache-2.0"
] | 0
|
81d4bb8d051cee20fa87c083b8478999e1766172
|
https://github.com/czlwang/s3prl/tree/81d4bb8d051cee20fa87c083b8478999e1766172
|
ResidualBlock
|
import torch
from torch import nn
class ResidualBlock(nn.Module):
def __init__(self, input_channels):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(input_channels, input_channels, kernel_size=
3, padding=1, padding_mode='reflect')
self.conv2 = nn.Conv2d(input_channels, input_channels, kernel_size=
3, padding=1, padding_mode='reflect')
self.instancenorm = nn.InstanceNorm2d(input_channels)
self.activation = nn.ReLU()
def forward(self, x):
original_x = x.clone()
x = self.conv1(x)
x = self.instancenorm(x)
x = self.activation(x)
x = self.conv2(x)
x = self.instancenorm(x)
return original_x + x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_1(in_out_ptr0,
in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp23, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_reflection_pad2d_relu_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_convolution_3(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (r2 + 16 * x3), xmask, other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp20 = tmp2 - tmp12
tmp21 = 16.0
tmp22 = tmp18 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tmp26 = tmp20 * tmp25
tmp27 = tmp19 + tmp26
tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask)
tl.store(out_ptr2 + (r2 + 16 * x3), tmp27, xmask)
tl.store(out_ptr3 + x3, tmp25, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576,
XBLOCK=256, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf6 = reinterpret_tensor(buf4, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf4
triton_per_fused__native_batch_norm_legit_convolution_1[grid(16)](buf2,
buf6, primals_3, buf3, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del primals_3
buf7 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
triton_poi_fused_reflection_pad2d_relu_2[grid(576)](buf2, buf3,
buf6, buf7, 576, XBLOCK=128, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(buf7, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1))
buf9 = buf8
del buf8
buf10 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf13 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
triton_per_fused__native_batch_norm_legit_add_convolution_3[grid(16)](
buf9, primals_5, primals_1, buf10, buf14, buf13, 16, 16, XBLOCK
=8, num_warps=2, num_stages=1)
del primals_1
del primals_5
return (buf14, primals_2, primals_4, buf0, buf2, buf3, buf6, buf7, buf9,
reinterpret_tensor(buf13, (16,), (1,), 0), reinterpret_tensor(buf10,
(1, 16, 1, 1), (16, 1, 1, 1), 0))
class ResidualBlockNew(nn.Module):
def __init__(self, input_channels):
super(ResidualBlockNew, self).__init__()
self.conv1 = nn.Conv2d(input_channels, input_channels, kernel_size=
3, padding=1, padding_mode='reflect')
self.conv2 = nn.Conv2d(input_channels, input_channels, kernel_size=
3, padding=1, padding_mode='reflect')
self.instancenorm = nn.InstanceNorm2d(input_channels)
self.activation = nn.ReLU()
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
diegushko/CycleGAN
|
ResidualBlock
| false
| 12,271
|
[
"MIT"
] | 0
|
630d1cd00cef3f09f036d3c734d31c772cc0a786
|
https://github.com/diegushko/CycleGAN/tree/630d1cd00cef3f09f036d3c734d31c772cc0a786
|
SelfAttentionPooling
|
import torch
import torch.nn as nn
class SelfAttentionPooling(nn.Module):
"""
Implementation of SelfAttentionPooling
Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition
https://arxiv.org/pdf/2008.01077v1.pdf
"""
def __init__(self, input_dim):
super(SelfAttentionPooling, self).__init__()
self.W = nn.Linear(input_dim, 1)
def forward(self, batch_rep):
"""
input:
batch_rep : size (N, T, H), N: batch size, T: sequence length, H: Hidden dimension
attention_weight:
att_w : size (N, T, 1)
return:
utter_rep: size (N, H)
"""
softmax = nn.functional.softmax
att_w = softmax(self.W(batch_rep).squeeze(-1)).unsqueeze(-1)
utter_rep = torch.sum(batch_rep * att_w, dim=1)
return utter_rep
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x1 = xindex // 4 % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + 64 * x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x3 + 64 * x2), xmask)
tmp4 = tl.load(in_ptr1 + (4 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (32 + x3 + 64 * x2), xmask)
tmp8 = tl.load(in_ptr1 + (8 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask)
tmp12 = tl.load(in_ptr1 + (12 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x4, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_1
del primals_2
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = buf2
del buf2
triton_poi_fused_mul_sum_2[grid(64)](primals_3, buf3, buf4, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf3
return buf4, primals_3, buf1
class SelfAttentionPoolingNew(nn.Module):
"""
Implementation of SelfAttentionPooling
Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition
https://arxiv.org/pdf/2008.01077v1.pdf
"""
def __init__(self, input_dim):
super(SelfAttentionPoolingNew, self).__init__()
self.W = nn.Linear(input_dim, 1)
def forward(self, input_0):
primals_1 = self.W.weight
primals_2 = self.W.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
czlwang/s3prl
|
SelfAttentionPooling
| false
| 12,272
|
[
"Apache-2.0"
] | 0
|
81d4bb8d051cee20fa87c083b8478999e1766172
|
https://github.com/czlwang/s3prl/tree/81d4bb8d051cee20fa87c083b8478999e1766172
|
AP
|
import torch
import torch.nn as nn
class AttentivePooling(nn.Module):
"""
Implementation of Attentive Pooling
"""
def __init__(self, input_dim, **kwargs):
super(AttentivePooling, self).__init__()
self.W_a = nn.Linear(input_dim, input_dim)
self.W = nn.Linear(input_dim, 1)
self.act_fn = nn.ReLU()
self.softmax = nn.functional.softmax
def forward(self, batch_rep, att_mask):
"""
input:
batch_rep : size (B, T, H), B: batch size, T: sequence length, H: Hidden dimension
attention_weight:
att_w : size (B, T, 1)
return:
utter_rep: size (B, H)
"""
att_logits = self.W(self.act_fn(self.W_a(batch_rep))).squeeze(-1)
att_logits = att_mask + att_logits
att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1)
utter_rep = torch.sum(batch_rep * att_w, dim=1)
return utter_rep, att_w
class AP(nn.Module):
""" Attentive Pooling module incoporate attention mask"""
def __init__(self, out_dim, input_dim):
super(AP, self).__init__()
self.linear = nn.Linear(input_dim, out_dim)
self.sap_layer = AttentivePooling(out_dim)
self.act_fn = nn.ReLU()
def forward(self, feature_BxTxH, att_mask_BxT):
"""
Arguments
feature_BxTxH - [BxTxH] Acoustic feature with shape
att_mask_BxT - [BxT] Attention Mask logits
"""
feature_BxTxH = self.linear(feature_BxTxH)
sap_vec, _ = self.sap_layer(feature_BxTxH, att_mask_BxT)
return sap_vec
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'out_dim': 4, 'input_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp5 - tmp14
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp9 - tmp14
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tmp13 - tmp14
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tl.store(out_ptr0 + x2, tmp14, xmask)
tl.store(out_ptr1 + x2, tmp25, xmask)
@triton.jit
def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 64
x3 = xindex // 64
x5 = xindex // 4 % 16
x2 = xindex // 16 % 4
x7 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + (x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr4 + (x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr0 + (64 + x4), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (16 + x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr2 + (16 + x5), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr3 + (4 + x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr4 + (4 + x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (128 + x4), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr1 + (32 + x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr2 + (32 + x5), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr3 + (8 + x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr4 + (8 + x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp32 = tl.load(in_ptr0 + (192 + x4), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr1 + (48 + x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp34 = tl.load(in_ptr2 + (48 + x5), xmask, eviction_policy='evict_last')
tmp36 = tl.load(in_ptr3 + (12 + x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp39 = tl.load(in_ptr4 + (12 + x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 / tmp7
tmp9 = tmp0 * tmp8
tmp13 = tmp11 + tmp12
tmp15 = tmp13 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp18 = tmp16 / tmp17
tmp19 = tmp10 * tmp18
tmp20 = tmp9 + tmp19
tmp24 = tmp22 + tmp23
tmp26 = tmp24 - tmp25
tmp27 = tl_math.exp(tmp26)
tmp29 = tmp27 / tmp28
tmp30 = tmp21 * tmp29
tmp31 = tmp20 + tmp30
tmp35 = tmp33 + tmp34
tmp37 = tmp35 - tmp36
tmp38 = tl_math.exp(tmp37)
tmp40 = tmp38 / tmp39
tmp41 = tmp32 * tmp40
tmp42 = tmp31 + tmp41
tl.store(out_ptr0 + x7, tmp42, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1,), (1,))
assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf1)
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf2,
primals_5, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_add_1[grid(64)](primals_8, buf4, buf5,
buf6, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sum_2[grid(256)](buf0, primals_8, buf4, buf5,
buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf5
del buf6
return buf7, primals_8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf2, (64, 4), (4, 1), 0
), buf4, primals_6, buf8, primals_4
class AttentivePooling(nn.Module):
"""
Implementation of Attentive Pooling
"""
def __init__(self, input_dim, **kwargs):
super(AttentivePooling, self).__init__()
self.W_a = nn.Linear(input_dim, input_dim)
self.W = nn.Linear(input_dim, 1)
self.act_fn = nn.ReLU()
self.softmax = nn.functional.softmax
def forward(self, batch_rep, att_mask):
"""
input:
batch_rep : size (B, T, H), B: batch size, T: sequence length, H: Hidden dimension
attention_weight:
att_w : size (B, T, 1)
return:
utter_rep: size (B, H)
"""
att_logits = self.W(self.act_fn(self.W_a(batch_rep))).squeeze(-1)
att_logits = att_mask + att_logits
att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1)
utter_rep = torch.sum(batch_rep * att_w, dim=1)
return utter_rep, att_w
class APNew(nn.Module):
""" Attentive Pooling module incoporate attention mask"""
def __init__(self, out_dim, input_dim):
super(APNew, self).__init__()
self.linear = nn.Linear(input_dim, out_dim)
self.sap_layer = AttentivePooling(out_dim)
self.act_fn = nn.ReLU()
def forward(self, input_0, input_1):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_4 = self.sap_layer.W_a.weight
primals_5 = self.sap_layer.W_a.bias
primals_6 = self.sap_layer.W.weight
primals_7 = self.sap_layer.W.bias
primals_3 = input_0
primals_8 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
czlwang/s3prl
|
AP
| false
| 12,273
|
[
"Apache-2.0"
] | 0
|
81d4bb8d051cee20fa87c083b8478999e1766172
|
https://github.com/czlwang/s3prl/tree/81d4bb8d051cee20fa87c083b8478999e1766172
|
BertLayer
|
from _paritybench_helpers import _mock_config
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transform(self, x, linear_layer):
bs, seq_len = x.shape[:2]
proj = linear_layer(x)
proj = proj.view(bs, seq_len, self.num_attention_heads, self.
attention_head_size)
proj = proj.transpose(1, 2)
return proj
def attention(self, key, query, value, attention_mask):
attention_scores = torch.matmul(query, key.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attn_value = torch.matmul(attention_probs, value)
attn_value = attn_value.transpose(1, 2).contiguous()
bs, seq_len = attn_value.shape[:2]
attn_value = attn_value.view(bs, seq_len, -1)
return attn_value
def forward(self, hidden_states, attention_mask):
key_layer = self.transform(hidden_states, self.key)
value_layer = self.transform(hidden_states, self.value)
query_layer = self.transform(hidden_states, self.query)
attn_value = self.attention(key_layer, query_layer, value_layer,
attention_mask)
return attn_value
class BertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.self_attention = BertSelfAttention(config)
self.attention_dense = nn.Linear(config.hidden_size, config.hidden_size
)
self.attention_layer_norm = nn.LayerNorm(config.hidden_size, eps=
config.layer_norm_eps)
self.attention_dropout = nn.Dropout(config.hidden_dropout_prob)
self.interm_dense = nn.Linear(config.hidden_size, config.
intermediate_size)
self.interm_af = F.gelu
self.out_dense = nn.Linear(config.intermediate_size, config.hidden_size
)
self.out_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
self.out_dropout = nn.Dropout(config.hidden_dropout_prob)
def add_norm(self, input, output, dense_layer, dropout, ln_layer):
proj = dense_layer(output)
proj = dropout(proj)
residual = proj + input
ln = ln_layer(residual)
return ln
def forward(self, hidden_states, attention_mask):
attn_outputs = self.self_attention(hidden_states, attention_mask)
attn_outputs = self.add_norm(hidden_states, attn_outputs, self.
attention_dense, self.attention_dropout, self.attention_layer_norm)
proj = self.interm_dense(attn_outputs)
proj = self.interm_af(proj)
layer_output = self.add_norm(attn_outputs, proj, self.out_dense,
self.out_dropout, self.out_layer_norm)
return layer_output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(num_attention_heads=4, hidden_size=
4, attention_probs_dropout_prob=0.5, layer_norm_eps=1,
hidden_dropout_prob=0.5, intermediate_size=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp5 - tmp14
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp9 - tmp14
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tmp13 - tmp14
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = float('-inf')
tmp27 = tmp2 == tmp26
tmp28 = tmp27 == 0
tmp29 = tmp28.to(tl.int64)
tmp30 = tmp29 != 0
tmp31 = tmp5 == tmp26
tmp32 = tmp31 == 0
tmp33 = tmp32.to(tl.int64)
tmp34 = tmp33 != 0
tmp35 = tmp30 | tmp34
tmp36 = tmp9 == tmp26
tmp37 = tmp36 == 0
tmp38 = tmp37.to(tl.int64)
tmp39 = tmp38 != 0
tmp40 = tmp35 | tmp39
tmp41 = tmp13 == tmp26
tmp42 = tmp41 == 0
tmp43 = tmp42.to(tl.int64)
tmp44 = tmp43 != 0
tmp45 = tmp40 | tmp44
tl.store(out_ptr0 + x2, tmp14, xmask)
tl.store(out_ptr1 + x2, tmp25, xmask)
tl.store(out_ptr2 + x2, tmp45, xmask)
@triton.jit
def triton_poi_fused_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 4
x4 = xindex
x5 = xindex % 64
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp2 = tl.load(in_out_ptr0 + x4, xmask)
tmp3 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last')
tmp1 = tmp0 == 0
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tmp10 = 0.0
tmp11 = tl.where(tmp1, tmp10, tmp9)
tl.store(in_out_ptr0 + x4, tmp11, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_gelu_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_9(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1.0
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_10(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17, primals_18
) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4,), (1,))
assert_size_stride(primals_17, (4,), (1,))
assert_size_stride(primals_18, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 4)](buf2, primals_7, buf3, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_7
buf4 = reinterpret_tensor(buf2, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf2
triton_poi_fused_0[grid(16, 4)](buf0, primals_3, buf4, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_3
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool)
triton_poi_fused_1[grid(64)](buf5, primals_8, buf6, buf7, buf8, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_2[grid(256)](buf9, buf8, primals_8, buf6, buf7,
256, XBLOCK=128, num_warps=4, num_stages=1)
del buf8
del primals_8
buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf7
triton_poi_fused_3[grid(16, 4)](buf1, primals_5, buf10, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_5
buf11 = reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf6
triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0)
del buf11
extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf13)
del primals_10
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(16)](buf13, primals_1,
buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(64)](buf13, primals_1,
buf14, buf15, primals_11, primals_12, buf16, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_12
buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_14, reinterpret_tensor(buf16, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf17)
del primals_14
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_gelu_7[grid(64)](buf17, buf18, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf19)
buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0)
del buf19
triton_poi_fused_add_8[grid(64)](buf20, primals_16, buf16, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_16
buf21 = buf15
del buf15
buf22 = buf14
del buf14
triton_poi_fused_native_layer_norm_9[grid(16)](buf20, buf21, buf22,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_10[grid(64)](buf20, buf21, buf22,
primals_17, primals_18, buf23, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf21
del buf22
del primals_18
return buf23, primals_1, primals_11, primals_17, buf9, reinterpret_tensor(
buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1,
4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0
), reinterpret_tensor(buf12, (16, 4), (4, 1), 0
), buf13, reinterpret_tensor(buf16, (16, 4), (4, 1), 0
), buf17, reinterpret_tensor(buf18, (16, 4), (4, 1), 0
), buf20, primals_15, primals_13, primals_9
class BertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transform(self, x, linear_layer):
bs, seq_len = x.shape[:2]
proj = linear_layer(x)
proj = proj.view(bs, seq_len, self.num_attention_heads, self.
attention_head_size)
proj = proj.transpose(1, 2)
return proj
def attention(self, key, query, value, attention_mask):
attention_scores = torch.matmul(query, key.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attn_value = torch.matmul(attention_probs, value)
attn_value = attn_value.transpose(1, 2).contiguous()
bs, seq_len = attn_value.shape[:2]
attn_value = attn_value.view(bs, seq_len, -1)
return attn_value
def forward(self, hidden_states, attention_mask):
key_layer = self.transform(hidden_states, self.key)
value_layer = self.transform(hidden_states, self.value)
query_layer = self.transform(hidden_states, self.query)
attn_value = self.attention(key_layer, query_layer, value_layer,
attention_mask)
return attn_value
class BertLayerNew(nn.Module):
def __init__(self, config):
super().__init__()
self.self_attention = BertSelfAttention(config)
self.attention_dense = nn.Linear(config.hidden_size, config.hidden_size
)
self.attention_layer_norm = nn.LayerNorm(config.hidden_size, eps=
config.layer_norm_eps)
self.attention_dropout = nn.Dropout(config.hidden_dropout_prob)
self.interm_dense = nn.Linear(config.hidden_size, config.
intermediate_size)
self.interm_af = F.gelu
self.out_dense = nn.Linear(config.intermediate_size, config.hidden_size
)
self.out_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
self.out_dropout = nn.Dropout(config.hidden_dropout_prob)
def add_norm(self, input, output, dense_layer, dropout, ln_layer):
proj = dense_layer(output)
proj = dropout(proj)
residual = proj + input
ln = ln_layer(residual)
return ln
def forward(self, input_0, input_1):
primals_2 = self.self_attention.query.weight
primals_3 = self.self_attention.query.bias
primals_4 = self.self_attention.key.weight
primals_5 = self.self_attention.key.bias
primals_6 = self.self_attention.value.weight
primals_7 = self.self_attention.value.bias
primals_9 = self.attention_dense.weight
primals_10 = self.attention_dense.bias
primals_11 = self.attention_layer_norm.weight
primals_12 = self.attention_layer_norm.bias
primals_13 = self.interm_dense.weight
primals_14 = self.interm_dense.bias
primals_15 = self.out_dense.weight
primals_16 = self.out_dense.bias
primals_17 = self.out_layer_norm.weight
primals_18 = self.out_layer_norm.bias
primals_1 = input_0
primals_8 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18])
return output[0]
|
brendon-boldt/minbert-assignment
|
BertLayer
| false
| 12,274
|
[
"Apache-2.0"
] | 0
|
0b562d791d34a40fd3c0383a0a32b4eeb2171cb5
|
https://github.com/brendon-boldt/minbert-assignment/tree/0b562d791d34a40fd3c0383a0a32b4eeb2171cb5
|
AdMSoftmaxLoss
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class AdMSoftmaxLoss(nn.Module):
def __init__(self, in_features, out_features, s=30.0, m=0.4):
"""
AM Softmax Loss
"""
super(AdMSoftmaxLoss, self).__init__()
self.s = s
self.m = m
self.in_features = in_features
self.out_features = out_features
self.fc = nn.Linear(in_features, out_features, bias=False)
def forward(self, x, labels):
"""
input shape (N, in_features)
"""
assert len(x) == len(labels)
assert torch.min(labels) >= 0
assert torch.max(labels) < self.out_features
for W in self.fc.parameters():
W = F.normalize(W, dim=1)
x = F.normalize(x, dim=1)
wf = self.fc(x)
numerator = self.s * (torch.diagonal(wf.transpose(0, 1)[labels]) -
self.m)
excl = torch.cat([torch.cat((wf[i, :y], wf[i, y + 1:])).unsqueeze(0
) for i, y in enumerate(labels)], dim=0)
denominator = torch.exp(numerator) + torch.sum(torch.exp(self.s *
excl), dim=1)
L = numerator - torch.log(denominator)
return -torch.mean(L)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.ones([4], dtype=torch.int64)]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_poi_fused_mul_sub_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x0 = xindex % 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask,
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (x0 + 16 * tmp4 + 64 * x1), xmask)
tmp7 = 0.4
tmp8 = tmp6 - tmp7
tmp9 = 30.0
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](primals_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4), (4, 1, 16), torch.float32)
triton_poi_fused_mul_sub_1[grid(64)](primals_2, buf1, buf2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
return buf2, reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
class AdMSoftmaxLossNew(nn.Module):
def __init__(self, in_features, out_features, s=30.0, m=0.4):
"""
AM Softmax Loss
"""
super(AdMSoftmaxLossNew, self).__init__()
self.s = s
self.m = m
self.in_features = in_features
self.out_features = out_features
self.fc = nn.Linear(in_features, out_features, bias=False)
def forward(self, input_0, input_1):
primals_3 = self.fc.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
czlwang/s3prl
|
AdMSoftmaxLoss
| false
| 12,275
|
[
"Apache-2.0"
] | 0
|
81d4bb8d051cee20fa87c083b8478999e1766172
|
https://github.com/czlwang/s3prl/tree/81d4bb8d051cee20fa87c083b8478999e1766172
|
SoftmaxLoss
|
import torch
import torch.nn as nn
class SoftmaxLoss(nn.Module):
def __init__(self, hidden_dim, speaker_num, **kwargs):
"""
Softmax Loss
"""
super(SoftmaxLoss, self).__init__()
self.fc = nn.Linear(hidden_dim, speaker_num)
self.loss = nn.CrossEntropyLoss()
def forward(self, x_BxH, labels_B):
"""
x shape: (B, H)
labels shape: (B)
"""
logits_BxSpn = self.fc(x_BxH)
loss = self.loss(logits_BxSpn, labels_B)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_dim': 4, 'speaker_num': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr1 + r3, None)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = -tmp18
tmp20 = 0.015625
tmp21 = tmp19 * tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused__log_softmax_div_mul_neg_sum_1[grid(1)](buf3, buf1,
primals_4, 1, 256, num_warps=2, num_stages=1)
del buf1
return buf3, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0
class SoftmaxLossNew(nn.Module):
def __init__(self, hidden_dim, speaker_num, **kwargs):
"""
Softmax Loss
"""
super(SoftmaxLossNew, self).__init__()
self.fc = nn.Linear(hidden_dim, speaker_num)
self.loss = nn.CrossEntropyLoss()
def forward(self, input_0, input_1):
primals_1 = self.fc.weight
primals_2 = self.fc.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
czlwang/s3prl
|
SoftmaxLoss
| false
| 12,276
|
[
"Apache-2.0"
] | 0
|
81d4bb8d051cee20fa87c083b8478999e1766172
|
https://github.com/czlwang/s3prl/tree/81d4bb8d051cee20fa87c083b8478999e1766172
|
CMVN
|
import torch
import torch.nn as nn
class CMVN(nn.Module):
__constants__ = ['mode', 'dim', 'eps']
def __init__(self, mode='global', dim=2, eps=1e-10):
super(CMVN, self).__init__()
if mode != 'global':
raise NotImplementedError(
'Only support global mean variance normalization.')
self.mode = mode
self.dim = dim
self.eps = eps
def forward(self, x):
if self.mode == 'global':
return (x - x.mean(self.dim, keepdim=True)) / (self.eps + x.std
(self.dim, keepdim=True))
def extra_repr(self):
return 'mode={}, dim={}, eps={}'.format(self.mode, self.dim, self.eps)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_std_sub_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp1 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp2 - tmp9
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp4 - tmp9
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp9
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = 3.0
tmp23 = tmp21 / tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp25 = 1e-10
tmp26 = tmp24 + tmp25
tmp27 = tmp10 / tmp26
tl.store(out_ptr0 + x3, tmp27, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_std_sub_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class CMVNNew(nn.Module):
__constants__ = ['mode', 'dim', 'eps']
def __init__(self, mode='global', dim=2, eps=1e-10):
super(CMVNNew, self).__init__()
if mode != 'global':
raise NotImplementedError(
'Only support global mean variance normalization.')
self.mode = mode
self.dim = dim
self.eps = eps
def extra_repr(self):
return 'mode={}, dim={}, eps={}'.format(self.mode, self.dim, self.eps)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
czlwang/s3prl
|
CMVN
| false
| 12,277
|
[
"Apache-2.0"
] | 0
|
81d4bb8d051cee20fa87c083b8478999e1766172
|
https://github.com/czlwang/s3prl/tree/81d4bb8d051cee20fa87c083b8478999e1766172
|
ASP
|
import torch
import torch.nn as nn
class AttentivePooling(nn.Module):
"""
Implementation of Attentive Pooling
"""
def __init__(self, input_dim, **kwargs):
super(AttentivePooling, self).__init__()
self.W_a = nn.Linear(input_dim, input_dim)
self.W = nn.Linear(input_dim, 1)
self.act_fn = nn.ReLU()
self.softmax = nn.functional.softmax
def forward(self, batch_rep, att_mask):
"""
input:
batch_rep : size (B, T, H), B: batch size, T: sequence length, H: Hidden dimension
attention_weight:
att_w : size (B, T, 1)
return:
utter_rep: size (B, H)
"""
att_logits = self.W(self.act_fn(self.W_a(batch_rep))).squeeze(-1)
att_logits = att_mask + att_logits
att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1)
utter_rep = torch.sum(batch_rep * att_w, dim=1)
return utter_rep, att_w
class ASP(nn.Module):
""" Attentive Statistic Pooling module incoporate attention mask"""
def __init__(self, out_dim, input_dim):
super(ASP, self).__init__()
self.linear = nn.Linear(input_dim, out_dim)
self.ap_layer = AttentivePooling(out_dim)
def forward(self, feature_BxTxH, att_mask_BxT):
"""
Arguments
feature_BxTxH - [BxTxH] Acoustic feature with shape
att_mask_BxT - [BxT] Attention Mask logits
"""
feature_BxTxH = self.linear(feature_BxTxH)
sap_vec, att_w = self.ap_layer(feature_BxTxH, att_mask_BxT)
variance = torch.sqrt(torch.sum(att_w * feature_BxTxH *
feature_BxTxH, dim=1) - sap_vec ** 2 + 1e-08)
statistic_pooling = torch.cat([sap_vec, variance], dim=-1)
return statistic_pooling
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'out_dim': 4, 'input_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp5 - tmp14
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp9 - tmp14
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tmp13 - tmp14
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tl.store(out_ptr0 + x2, tmp14, xmask)
tl.store(out_ptr1 + x2, tmp25, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex // 4
x5 = xindex // 4 % 64
x7 = xindex // 16
x8 = xindex % 256
x9 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x7, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + x7, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr4 + x8, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp7 = tmp5 / tmp6
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + x9, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_mul_pow_sqrt_sub_sum_3(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr2, out_ptr3, out_ptr4,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x6 = xindex % 64
x3 = xindex // 64
x4 = xindex // 4 % 16
x2 = xindex // 16 % 4
x0 = xindex % 4
x5 = xindex // 4
x8 = xindex
tmp0 = tl.load(in_ptr0 + x6, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x4 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + (x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr4 + (x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr0 + (64 + x6), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (16 + x4 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr2 + (16 + x4), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr3 + (4 + x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr4 + (4 + x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (128 + x6), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr1 + (32 + x4 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr2 + (32 + x4), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr3 + (8 + x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr4 + (8 + x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp32 = tl.load(in_ptr0 + (192 + x6), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr1 + (48 + x4 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp34 = tl.load(in_ptr2 + (48 + x4), xmask, eviction_policy='evict_last')
tmp36 = tl.load(in_ptr3 + (12 + x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp39 = tl.load(in_ptr4 + (12 + x2 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp43 = tl.load(in_ptr5 + (x6 + 256 * x3), xmask)
tmp45 = tl.load(in_ptr5 + (64 + x6 + 256 * x3), xmask)
tmp48 = tl.load(in_ptr5 + (128 + x6 + 256 * x3), xmask)
tmp51 = tl.load(in_ptr5 + (192 + x6 + 256 * x3), xmask)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 / tmp7
tmp9 = tmp0 * tmp8
tmp13 = tmp11 + tmp12
tmp15 = tmp13 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp18 = tmp16 / tmp17
tmp19 = tmp10 * tmp18
tmp20 = tmp9 + tmp19
tmp24 = tmp22 + tmp23
tmp26 = tmp24 - tmp25
tmp27 = tl_math.exp(tmp26)
tmp29 = tmp27 / tmp28
tmp30 = tmp21 * tmp29
tmp31 = tmp20 + tmp30
tmp35 = tmp33 + tmp34
tmp37 = tmp35 - tmp36
tmp38 = tl_math.exp(tmp37)
tmp40 = tmp38 / tmp39
tmp41 = tmp32 * tmp40
tmp42 = tmp31 + tmp41
tmp44 = tmp43 * tmp0
tmp46 = tmp45 * tmp10
tmp47 = tmp44 + tmp46
tmp49 = tmp48 * tmp21
tmp50 = tmp47 + tmp49
tmp52 = tmp51 * tmp32
tmp53 = tmp50 + tmp52
tmp54 = tmp42 * tmp42
tmp55 = tmp53 - tmp54
tmp56 = 1e-08
tmp57 = tmp55 + tmp56
tmp58 = libdevice.sqrt(tmp57)
tmp59 = 2.0
tmp60 = tmp58 * tmp59
tmp61 = tmp42 * tmp59
tl.store(out_ptr0 + (x0 + 8 * x5), tmp42, xmask)
tl.store(out_ptr2 + (x0 + 8 * x5), tmp58, xmask)
tl.store(out_ptr3 + x8, tmp60, xmask)
tl.store(out_ptr4 + x8, tmp61, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1,), (1,))
assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf1)
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf2,
primals_5, buf14, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_add_1[grid(64)](primals_8, buf4, buf5,
buf6, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_mul_2[grid(1024)](primals_8, buf4, buf5, buf6,
buf0, buf8, 1024, XBLOCK=256, num_warps=4, num_stages=1)
buf11 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32
)
buf7 = reinterpret_tensor(buf11, (4, 4, 4, 4), (128, 32, 8, 1), 0)
buf10 = reinterpret_tensor(buf11, (4, 4, 4, 4), (128, 32, 8, 1), 4)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_pow_sqrt_sub_sum_3[grid(256)](buf0,
primals_8, buf4, buf5, buf6, buf8, buf7, buf10, buf12, buf13,
256, XBLOCK=128, num_warps=4, num_stages=1)
del buf5
del buf6
return buf11, primals_8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf2, (64, 4), (4, 1), 0
), buf4, buf8, buf12, buf13, primals_6, buf14, primals_4
class AttentivePooling(nn.Module):
"""
Implementation of Attentive Pooling
"""
def __init__(self, input_dim, **kwargs):
super(AttentivePooling, self).__init__()
self.W_a = nn.Linear(input_dim, input_dim)
self.W = nn.Linear(input_dim, 1)
self.act_fn = nn.ReLU()
self.softmax = nn.functional.softmax
def forward(self, batch_rep, att_mask):
"""
input:
batch_rep : size (B, T, H), B: batch size, T: sequence length, H: Hidden dimension
attention_weight:
att_w : size (B, T, 1)
return:
utter_rep: size (B, H)
"""
att_logits = self.W(self.act_fn(self.W_a(batch_rep))).squeeze(-1)
att_logits = att_mask + att_logits
att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1)
utter_rep = torch.sum(batch_rep * att_w, dim=1)
return utter_rep, att_w
class ASPNew(nn.Module):
""" Attentive Statistic Pooling module incoporate attention mask"""
def __init__(self, out_dim, input_dim):
super(ASPNew, self).__init__()
self.linear = nn.Linear(input_dim, out_dim)
self.ap_layer = AttentivePooling(out_dim)
def forward(self, input_0, input_1):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_4 = self.ap_layer.W_a.weight
primals_5 = self.ap_layer.W_a.bias
primals_6 = self.ap_layer.W.weight
primals_7 = self.ap_layer.W.bias
primals_3 = input_0
primals_8 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
czlwang/s3prl
|
ASP
| false
| 12,278
|
[
"Apache-2.0"
] | 0
|
81d4bb8d051cee20fa87c083b8478999e1766172
|
https://github.com/czlwang/s3prl/tree/81d4bb8d051cee20fa87c083b8478999e1766172
|
ChannelNorm
|
import torch
import torch.nn as nn
class ChannelNorm(nn.Module):
def __init__(self, numFeatures, epsilon=1e-05, affine=True):
super(ChannelNorm, self).__init__()
if affine:
self.weight = nn.parameter.Parameter(torch.Tensor(1,
numFeatures, 1))
self.bias = nn.parameter.Parameter(torch.Tensor(1, numFeatures, 1))
else:
self.weight = None
self.bias = None
self.epsilon = epsilon
self.p = 0
self.affine = affine
self.reset_parameters()
def reset_parameters(self):
if self.affine:
torch.nn.init.ones_(self.weight)
torch.nn.init.zeros_(self.bias)
def forward(self, x):
cumMean = x.mean(dim=1, keepdim=True)
cumVar = x.var(dim=1, keepdim=True)
x = (x - cumMean) * torch.rsqrt(cumVar + self.epsilon)
if self.weight is not None:
x = x * self.weight + self.bias
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'numFeatures': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mean_mul_rsqrt_sub_var_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x3 = xindex // 64
x5 = xindex % 16
x1 = xindex // 4 % 4
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + (x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp1 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp2 - tmp9
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp4 - tmp9
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp9
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = 3.0
tmp23 = tmp21 / tmp22
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = libdevice.rsqrt(tmp25)
tmp27 = tmp10 * tmp26
tmp29 = tmp27 * tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x4, tmp31, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (1, 4, 1), (4, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mean_mul_rsqrt_sub_var_0[grid(256)](primals_1,
primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
del primals_3
return buf0, primals_1
class ChannelNormNew(nn.Module):
def __init__(self, numFeatures, epsilon=1e-05, affine=True):
super(ChannelNormNew, self).__init__()
if affine:
self.weight = nn.parameter.Parameter(torch.Tensor(1,
numFeatures, 1))
self.bias = nn.parameter.Parameter(torch.Tensor(1, numFeatures, 1))
else:
self.weight = None
self.bias = None
self.epsilon = epsilon
self.p = 0
self.affine = affine
self.reset_parameters()
def reset_parameters(self):
if self.affine:
torch.nn.init.ones_(self.weight)
torch.nn.init.zeros_(self.bias)
def forward(self, input_0):
primals_2 = self.weight
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
czlwang/s3prl
|
ChannelNorm
| false
| 12,279
|
[
"Apache-2.0"
] | 0
|
81d4bb8d051cee20fa87c083b8478999e1766172
|
https://github.com/czlwang/s3prl/tree/81d4bb8d051cee20fa87c083b8478999e1766172
|
Block
|
import math
import torch
import torch.utils.data
import torch
import torch.nn as nn
def gelu(x):
""" Original Implementation of the gelu activation function in Google Bert repo when initialy created.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.
device)
random_tensor.floor_()
output = x.div(keep_prob) * random_tensor
return output
class matmul(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x1, x2):
x = x1 @ x2
return x
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=gelu, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.mat = matmul()
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads
).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = self.mat(q, k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = self.mat(attn, v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False,
qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=
gelu, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias,
qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.utils.data
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (8 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + 1)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr2 + 2)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + 3)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_add_div_erf_mul_10(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865475
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (16, 4), (4, 1))
assert_size_stride(primals_10, (16,), (1,))
assert_size_stride(primals_11, (4, 16), (16, 1))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_1
del primals_2
buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf3, buf4, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
triton_poi_fused_clone_3[grid(16, 4)](buf3, buf5, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(256)](buf6, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
triton_poi_fused__softmax_5[grid(256)](buf7, buf8, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_6[grid(16, 4)](buf3, buf9, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
del buf3
buf10 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_7[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0)
del buf10
extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf12)
buf13 = buf1
del buf1
buf14 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_8[grid(16)](primals_3, buf12,
primals_6, buf13, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_9[grid(64)](primals_3, buf12,
primals_6, buf13, buf14, primals_7, primals_8, buf15, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf13
del buf14
del primals_8
buf16 = reinterpret_tensor(buf7, (16, 16), (16, 1), 0)
del buf7
extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 16), (1, 4), 0),
alpha=1, beta=1, out=buf16)
del primals_10
buf17 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_add_div_erf_mul_10[grid(256)](buf16, buf17, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_11, (16, 4), (1, 16), 0), out=buf18)
buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0)
del buf18
triton_poi_fused_add_11[grid(64)](buf19, primals_3, buf12,
primals_6, primals_12, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_12
return buf19, primals_3, primals_6, primals_7, reinterpret_tensor(buf2,
(16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0
), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0
), buf16, reinterpret_tensor(buf17, (16, 16), (16, 1), 0
), primals_11, primals_9, primals_5, reinterpret_tensor(buf9, (16,
1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_4
def gelu(x):
""" Original Implementation of the gelu activation function in Google Bert repo when initialy created.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.
device)
random_tensor.floor_()
output = x.div(keep_prob) * random_tensor
return output
class matmul(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x1, x2):
x = x1 @ x2
return x
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=gelu, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None,
attn_drop=0.0, proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.mat = matmul()
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads
).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = self.mat(q, k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = self.mat(attn, v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class BlockNew(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False,
qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=
gelu, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias,
qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
def forward(self, input_0):
primals_1 = self.norm1.weight
primals_2 = self.norm1.bias
primals_4 = self.attn.qkv.weight
primals_5 = self.attn.proj.weight
primals_6 = self.attn.proj.bias
primals_7 = self.norm2.weight
primals_8 = self.norm2.bias
primals_9 = self.mlp.fc1.weight
primals_10 = self.mlp.fc1.bias
primals_11 = self.mlp.fc2.weight
primals_12 = self.mlp.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
|
denisleonov/pytorch-CycleGAN-and-pix2pix
|
Block
| false
| 12,280
|
[
"BSD-3-Clause"
] | 0
|
d1a5f0c5911f70ed896f826619b4067ce737a83d
|
https://github.com/denisleonov/pytorch-CycleGAN-and-pix2pix/tree/d1a5f0c5911f70ed896f826619b4067ce737a83d
|
AttentivePooling
|
import torch
import torch.nn as nn
class AttentivePooling(nn.Module):
"""
Implementation of Attentive Pooling
"""
def __init__(self, input_dim, **kwargs):
super(AttentivePooling, self).__init__()
self.W_a = nn.Linear(input_dim, input_dim)
self.W = nn.Linear(input_dim, 1)
self.act_fn = nn.ReLU()
self.softmax = nn.functional.softmax
def forward(self, batch_rep, att_mask):
"""
input:
batch_rep : size (B, T, H), B: batch size, T: sequence length, H: Hidden dimension
attention_weight:
att_w : size (B, T, 1)
return:
utter_rep: size (B, H)
"""
att_logits = self.W(self.act_fn(self.W_a(batch_rep))).squeeze(-1)
att_logits = att_mask + att_logits
att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1)
utter_rep = torch.sum(batch_rep * att_w, dim=1)
return utter_rep, att_w
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp8 = tmp7 + tmp3
tmp9 = tmp6 + tmp8
tmp10 = triton_helpers.maximum(tmp5, tmp9)
tmp13 = tmp12 + tmp3
tmp14 = tmp11 + tmp13
tmp15 = triton_helpers.maximum(tmp10, tmp14)
tmp18 = tmp17 + tmp3
tmp19 = tmp16 + tmp18
tmp20 = triton_helpers.maximum(tmp15, tmp19)
tmp21 = tmp5 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp9 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp19 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tl.store(out_ptr0 + x2, tmp20, xmask)
tl.store(out_ptr1 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused__softmax_add_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 64
x5 = xindex // 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr4 + x5, xmask, eviction_policy='evict_last')
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 64
x1 = xindex // 4 % 16
x2 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (64 + x3), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (128 + x3), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (32 + x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (192 + x3), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (48 + x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x4, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_add_1[grid(64)](primals_6, buf2,
primals_5, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_add_2[grid(256)](primals_6, buf2,
primals_5, buf3, buf4, buf5, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf2
del buf3
del buf4
del primals_5
del primals_6
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sum_3[grid(256)](primals_3, buf5, buf6, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return buf6, reinterpret_tensor(buf5, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0
), primals_3, reinterpret_tensor(buf1, (64, 4), (4, 1), 0
), buf5, primals_4, buf7
class AttentivePoolingNew(nn.Module):
"""
Implementation of Attentive Pooling
"""
def __init__(self, input_dim, **kwargs):
super(AttentivePoolingNew, self).__init__()
self.W_a = nn.Linear(input_dim, input_dim)
self.W = nn.Linear(input_dim, 1)
self.act_fn = nn.ReLU()
self.softmax = nn.functional.softmax
def forward(self, input_0, input_1):
primals_1 = self.W_a.weight
primals_2 = self.W_a.bias
primals_4 = self.W.weight
primals_5 = self.W.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
|
czlwang/s3prl
|
AttentivePooling
| false
| 12,281
|
[
"Apache-2.0"
] | 0
|
81d4bb8d051cee20fa87c083b8478999e1766172
|
https://github.com/czlwang/s3prl/tree/81d4bb8d051cee20fa87c083b8478999e1766172
|
Delta
|
import torch
import torch.nn as nn
from torchaudio import transforms
class Delta(nn.Module):
def __init__(self, order=2, **kwargs):
super(Delta, self).__init__()
self.order = order
self.compute_delta = transforms.ComputeDeltas(**kwargs)
def forward(self, x):
feats = [x]
for o in range(self.order):
feat = feats[-1].transpose(0, 1).unsqueeze(0)
delta = self.compute_delta(feat)
feats.append(delta.squeeze(0).transpose(0, 1))
x = torch.cat(feats, dim=-1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torchaudio import transforms
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_replication_pad1d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 * (x1 % 4) + 16 * (x1 // 16) + 64 * (x1 //
4 % 4) + (3 * (3 <= 0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0)) +
(0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0)) * (0 * (0 >= -2 +
x0) + (-2 + x0) * (-2 + x0 > 0) < 3))), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_arange_repeat_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x2 = xindex
tmp0 = -2 + x0
tmp1 = tmp0.to(tl.float32)
tl.store(out_ptr0 + x2, tmp1, xmask)
@triton.jit
def triton_poi_fused_replication_pad1d_2(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x1 + (3 * (3 <= 0 * (0 >= -2 + x0) + (-2 +
x0) * (-2 + x0 > 0)) + (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 >
0)) * (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0) < 3))), xmask,
eviction_policy='evict_last')
tmp1 = 0.1
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x4 = xindex // 12
x1 = xindex // 12 % 4
x2 = xindex // 48 % 4
x3 = xindex // 192
x5 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x4 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (4 * x1 + 16 * x3 + 64 * x2 + (-4 + x0)),
tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = 0.1
tmp12 = tmp10 * tmp11
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp9, tmp12, tmp13)
tmp15 = tmp0 >= tmp7
tl.full([1], 12, tl.int64)
tmp18 = tl.load(in_ptr2 + (4 * x1 + 16 * x3 + 64 * x2 + (-8 + x0)),
tmp15 & xmask, eviction_policy='evict_last', other=0.0)
tmp19 = tmp18 * tmp11
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp15, tmp19, tmp20)
tmp22 = tl.where(tmp9, tmp14, tmp21)
tmp23 = tl.where(tmp4, tmp5, tmp22)
tl.store(out_ptr0 + x5, tmp23, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 64, 8), (512, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_replication_pad1d_0[grid(512)](arg0_1, buf0, 512,
XBLOCK=128, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((64, 1, 5), (5, 5, 1), torch.float32)
triton_poi_fused_arange_repeat_1[grid(320)](buf1, 320, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = extern_kernels.convolution(buf0, buf1, stride=(1,), padding=
(0,), dilation=(1,), transposed=False, output_padding=(0,),
groups=64, bias=None)
assert_size_stride(buf2, (1, 64, 4), (256, 4, 1))
buf3 = buf0
del buf0
triton_poi_fused_replication_pad1d_2[grid(512)](buf2, buf3, 512,
XBLOCK=256, num_warps=4, num_stages=1)
buf4 = buf1
del buf1
triton_poi_fused_arange_repeat_1[grid(320)](buf4, 320, XBLOCK=256,
num_warps=4, num_stages=1)
buf5 = extern_kernels.convolution(buf3, buf4, stride=(1,), padding=
(0,), dilation=(1,), transposed=False, output_padding=(0,),
groups=64, bias=None)
assert_size_stride(buf5, (1, 64, 4), (256, 4, 1))
del buf3
del buf4
buf6 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch.
float32)
triton_poi_fused_cat_3[grid(768)](arg0_1, buf2, buf5, buf6, 768,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del buf2
del buf5
return buf6,
class DeltaNew(nn.Module):
def __init__(self, order=2, **kwargs):
super(DeltaNew, self).__init__()
self.order = order
self.compute_delta = transforms.ComputeDeltas(**kwargs)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
czlwang/s3prl
|
Delta
| false
| 12,282
|
[
"Apache-2.0"
] | 0
|
81d4bb8d051cee20fa87c083b8478999e1766172
|
https://github.com/czlwang/s3prl/tree/81d4bb8d051cee20fa87c083b8478999e1766172
|
ExpandingBlock
|
import torch
from torch import nn
class ExpandingBlock(nn.Module):
def __init__(self, input_channels, use_bn=True):
super(ExpandingBlock, self).__init__()
self.conv1 = nn.ConvTranspose2d(input_channels, input_channels // 2,
kernel_size=3, stride=2, padding=1, output_padding=1)
if use_bn:
self.instancenorm = nn.InstanceNorm2d(input_channels // 2)
self.use_bn = use_bn
self.activation = nn.ReLU()
def forward(self, x):
x = self.conv1(x)
if self.use_bn:
x = self.instancenorm(x)
x = self.activation(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_relu_threshold_backward_0(
in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 8
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + (r2 + 64 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = tmp2 - tmp12
tmp20 = 64.0
tmp21 = tmp18 / tmp20
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.rsqrt(tmp23)
tmp25 = tmp19 * tmp24
tmp26 = tl.full([1, 1], 0, tl.int32)
tmp27 = triton_helpers.maximum(tmp26, tmp25)
tmp28 = 0.0
tmp29 = tmp27 <= tmp28
tl.store(in_out_ptr0 + (r2 + 64 * x3), tmp2, xmask)
tl.store(out_ptr2 + (r2 + 64 * x3), tmp27, xmask)
tl.store(out_ptr3 + (r2 + 64 * x3), tmp29, xmask)
tl.store(out_ptr4 + x3, tmp24, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 2, 3, 3), (18, 9, 3, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf0, (4, 2, 8, 8), (128, 64, 8, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((1, 8, 1, 1), (8, 1, 8, 8), torch.float32)
buf6 = empty_strided_cuda((4, 2, 8, 8), (128, 64, 8, 1), torch.float32)
buf7 = empty_strided_cuda((4, 2, 8, 8), (128, 64, 8, 1), torch.bool)
buf5 = empty_strided_cuda((1, 8, 1, 1), (8, 1, 8, 8), torch.float32)
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_convolution_relu_threshold_backward_0[
grid(8)](buf1, primals_2, buf2, buf6, buf7, buf5, 8, 64, XBLOCK
=8, num_warps=4, num_stages=1)
del primals_2
return buf6, primals_1, primals_3, buf1, reinterpret_tensor(buf5, (8,),
(1,), 0), buf7, reinterpret_tensor(buf2, (1, 8, 1, 1), (8, 1, 1, 1), 0)
class ExpandingBlockNew(nn.Module):
def __init__(self, input_channels, use_bn=True):
super(ExpandingBlockNew, self).__init__()
self.conv1 = nn.ConvTranspose2d(input_channels, input_channels // 2,
kernel_size=3, stride=2, padding=1, output_padding=1)
if use_bn:
self.instancenorm = nn.InstanceNorm2d(input_channels // 2)
self.use_bn = use_bn
self.activation = nn.ReLU()
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
diegushko/CycleGAN
|
ExpandingBlock
| false
| 12,283
|
[
"MIT"
] | 0
|
630d1cd00cef3f09f036d3c734d31c772cc0a786
|
https://github.com/diegushko/CycleGAN/tree/630d1cd00cef3f09f036d3c734d31c772cc0a786
|
FocalLoss
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.nn.functional as F
def focal_loss(input_values, gamma):
"""Computes the focal loss"""
p = torch.exp(-input_values)
loss = (1 - p) ** gamma * input_values
return loss.mean()
class FocalLoss(nn.Module):
def __init__(self, weight=None, gamma=0.0):
super(FocalLoss, self).__init__()
assert gamma >= 0
self.gamma = gamma
self.weight = weight
def forward(self, input, target):
return focal_loss(F.cross_entropy(input, target, reduction='none',
weight=self.weight), self.gamma)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp13 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp16 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp20 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp24 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 + tmp3
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tl_math.log(tmp10)
tmp12 = tmp0 - tmp11
tmp14 = tmp12 * tmp13
tmp15 = tmp2 - tmp11
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp5 - tmp11
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp8 - tmp11
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = -tmp26
tmp28 = -tmp27
tmp29 = tl_math.exp(tmp28)
tmp30 = 1.0
tmp30 - tmp29
tmp32 = tmp30 * tmp27
tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK])
tmp35 = tl.sum(tmp33, 1)[:, None]
tmp36 = 64.0
tmp37 = tmp35 / tmp36
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp37, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1[grid(1)](
buf3, buf0, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del buf0
return buf3,
def focal_loss(input_values, gamma):
"""Computes the focal loss"""
p = torch.exp(-input_values)
loss = (1 - p) ** gamma * input_values
return loss.mean()
class FocalLossNew(nn.Module):
def __init__(self, weight=None, gamma=0.0):
super(FocalLossNew, self).__init__()
assert gamma >= 0
self.gamma = gamma
self.weight = weight
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
dixit-dude7/LDAM-DRW
|
FocalLoss
| false
| 12,284
|
[
"MIT"
] | 0
|
6366f4756d3ac0c6b6db784b7f20e16066967ed4
|
https://github.com/dixit-dude7/LDAM-DRW/tree/6366f4756d3ac0c6b6db784b7f20e16066967ed4
|
NormedLinear
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.nn.functional as F
from torch.nn import Parameter
class NormedLinear(nn.Module):
def __init__(self, in_features, out_features):
super(NormedLinear, self).__init__()
self.weight = Parameter(torch.Tensor(in_features, out_features))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0)
def forward(self, x):
out = F.normalize(x, dim=1).mm(F.normalize(self.weight, dim=0))
return out
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_1[grid(16)](primals_2, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, buf1, out=buf2)
del buf1
return buf2, primals_2, reinterpret_tensor(buf0, (4, 4), (1, 4), 0)
class NormedLinearNew(nn.Module):
def __init__(self, in_features, out_features):
super(NormedLinearNew, self).__init__()
self.weight = Parameter(torch.Tensor(in_features, out_features))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
dixit-dude7/LDAM-DRW
|
NormedLinear
| false
| 12,285
|
[
"MIT"
] | 0
|
6366f4756d3ac0c6b6db784b7f20e16066967ed4
|
https://github.com/dixit-dude7/LDAM-DRW/tree/6366f4756d3ac0c6b6db784b7f20e16066967ed4
|
Warp
|
import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
def coords_grid(flow: 'Tensor') ->Tensor:
"""Generate shifted coordinate grid based based input flow.
Args:
flow (Tensor): Estimated optical flow.
Returns:
Tensor: The coordinate that shifted by input flow and scale in the
range [-1, 1].
"""
B, _, H, W = flow.shape
xx = torch.arange(0, W, device=flow.device, requires_grad=False)
yy = torch.arange(0, H, device=flow.device, requires_grad=False)
coords = torch.meshgrid(yy, xx)
coords = torch.stack(coords[::-1], dim=0).float()
grid = coords[None].repeat(B, 1, 1, 1) + flow
grid[:, 0, ...] = grid[:, 0, ...] * 2.0 / max(W - 1, 1) - 1.0
grid[:, 1, ...] = grid[:, 1, ...] * 2.0 / max(H - 1, 1) - 1.0
grid = grid.permute(0, 2, 3, 1)
return grid
class Warp(nn.Module):
"""Warping layer to warp feature using optical flow.
Args:
mode (str): interpolation mode to calculate output values. Options are
'bilinear' and 'nearest'. Defaults to 'bilinear'.
padding_mode (str): padding mode for outside grid values. Options are
'zero', 'border' and 'reflection'. Defaults to 'zeros'.
align_corners (bool): If set to True, the extrema (-1 and 1) are
considered as referring to the center points of the input’s corner
pixels. If set to False, they are instead considered as referring
to the corner points of the input’s corner pixels, making the
sampling more resolution agnostic. Default to False.
"""
def __init__(self, mode: 'str'='bilinear', padding_mode: 'str'='zeros',
align_corners: 'bool'=False, use_mask: 'bool'=True) ->None:
super().__init__()
self.mode = mode
self.padding_mode = padding_mode
self.align_corners = align_corners
self.use_mask = use_mask
def forward(self, feat: 'Tensor', flow: 'Tensor') ->Tensor:
"""Forward function for warp.
Args:
feat (Tensor): Input feature
flow (Tensor): Input optical flow.
Returns:
Tensor: The output feature that was generated by warping input
feature based input flow.
"""
grid = coords_grid(flow)
out = F.grid_sample(feat, grid, mode=self.mode, padding_mode=self.
padding_mode, align_corners=self.align_corners)
mask = torch.ones(feat.size(), device=feat.device, requires_grad=False)
if self.use_mask:
mask = F.grid_sample(mask, grid, mode=self.mode, padding_mode=
self.padding_mode, align_corners=self.align_corners)
mask = (mask > 0.9999).float()
return out * mask
def __repr__(self):
s = self.__class__.__name__
s += f'(mode={self.mode}, '
s += f'padding_mode={self.padding_mode}, '
s += f'align_corners={self.align_corners},'
s += f'use_mask={self.use_mask})'
return s
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 2, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import Tensor
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_copy_div_mul_repeat_sub_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16 % 2
x1 = xindex // 4 % 4
x0 = xindex % 4
x3 = xindex // 32
x5 = xindex % 16
x6 = xindex // 4 % 8
x7 = xindex
tmp19 = tl.load(in_ptr0 + (x5 + 32 * x3), xmask, eviction_policy=
'evict_last')
tmp38 = tl.load(in_ptr0 + x7, xmask)
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = x1
tl.full([1], 0, tl.int64)
tmp6 = tl.full([1], 4, tl.int64)
tmp7 = tmp3 < tmp6
tmp8 = x0
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 >= tmp6
tl.full([1], 8, tl.int64)
tmp14 = -4 + x1
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp11, tmp14, tmp15)
tmp17 = tl.where(tmp7, tmp10, tmp16)
tmp18 = tmp17.to(tl.float32)
tmp20 = tmp18 + tmp19
tmp21 = 2.0
tmp22 = tmp20 * tmp21
tmp23 = 0.3333333333333333
tmp24 = tmp22 * tmp23
tmp25 = 1.0
tmp26 = tmp24 - tmp25
tmp27 = x6
tmp29 = tmp27 < tmp6
tmp30 = tl.where(tmp29, tmp8, tmp9)
tmp31 = tmp27 >= tmp6
tmp33 = -4 + x1 + 4 * x2
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp31, tmp33, tmp34)
tmp36 = tl.where(tmp29, tmp30, tmp35)
tmp37 = tmp36.to(tl.float32)
tmp39 = tmp37 + tmp38
tmp40 = tl.where(tmp2, tmp26, tmp39)
tl.store(out_ptr0 + x7, tmp40, xmask)
@triton.jit
def triton_poi_fused__to_copy_grid_sampler_2d_gt_mul_ones_1(in_out_ptr5,
in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex
x4 = xindex // 16
tmp3 = tl.load(in_ptr0 + (16 + x0 + 32 * x2), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr0 + (x0 + 32 * x2), xmask, eviction_policy=
'evict_last')
tmp0 = tl.full([1], 0, tl.int32)
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tmp6 = 0.3333333333333333
tmp7 = tmp5 * tmp6
tmp8 = 1.0
tmp9 = tmp7 - tmp8
tmp11 = tl.where(tmp2, tmp9, tmp10)
tmp12 = tmp11 * tmp4
tmp13 = 1.5
tmp14 = tmp12 + tmp13
tmp15 = libdevice.floor(tmp14)
tmp16 = tmp15 + tmp8
tmp17 = 4.0
tmp18 = tmp16 < tmp17
tmp19 = tmp1 == tmp1
tmp20 = tl.where(tmp19, tmp9, tmp3)
tmp21 = tmp20 * tmp4
tmp22 = tmp21 + tmp13
tmp23 = libdevice.floor(tmp22)
tmp24 = tmp23 + tmp8
tmp25 = 0.0
tmp26 = tmp24 >= tmp25
tmp27 = tmp24 < tmp17
tmp28 = tmp26 & tmp27
tmp29 = tmp18 & tmp28
tmp30 = tmp15 >= tmp25
tmp31 = tmp15 < tmp17
tmp32 = tmp31 & tmp28
tmp33 = tmp30 & tmp32
tmp34 = tmp16 >= tmp25
tmp35 = tmp23 >= tmp25
tmp36 = tmp23 < tmp17
tmp37 = tmp35 & tmp36
tmp38 = tmp18 & tmp37
tmp39 = tmp34 & tmp38
tmp40 = tmp31 & tmp37
tmp41 = tmp30 & tmp40
tmp42 = tmp16 - tmp14
tmp43 = tmp24 - tmp22
tmp44 = tmp42 * tmp43
tmp45 = tl.where(tmp41, tmp44, tmp25)
tmp46 = tmp23.to(tl.int64)
tmp47 = tl.full([1], 0, tl.int64)
tmp48 = tl.where(tmp39, tmp46, tmp47)
tmp49 = tl.full([XBLOCK], 4, tl.int32)
tmp50 = tmp48 + tmp49
tmp51 = tmp48 < 0
tmp52 = tl.where(tmp51, tmp50, tmp48)
tl.device_assert((0 <= tmp52) & (tmp52 < 4) | ~xmask,
'index out of bounds: 0 <= tmp52 < 4')
tmp54 = tmp16.to(tl.int64)
tmp55 = tl.where(tmp39, tmp54, tmp47)
tmp56 = tmp55 + tmp49
tmp57 = tmp55 < 0
tmp58 = tl.where(tmp57, tmp56, tmp55)
tl.device_assert((0 <= tmp58) & (tmp58 < 4) | ~xmask,
'index out of bounds: 0 <= tmp58 < 4')
tmp60 = tmp14 - tmp15
tmp61 = tmp60 * tmp43
tmp62 = tl.where(tmp39, tmp61, tmp25)
tmp63 = tmp8 * tmp62
tmp64 = tmp24.to(tl.int64)
tmp65 = tl.where(tmp33, tmp64, tmp47)
tmp66 = tmp65 + tmp49
tmp67 = tmp65 < 0
tmp68 = tl.where(tmp67, tmp66, tmp65)
tl.device_assert((0 <= tmp68) & (tmp68 < 4) | ~xmask,
'index out of bounds: 0 <= tmp68 < 4')
tmp70 = tmp15.to(tl.int64)
tmp71 = tl.where(tmp33, tmp70, tmp47)
tmp72 = tmp71 + tmp49
tmp73 = tmp71 < 0
tmp74 = tl.where(tmp73, tmp72, tmp71)
tl.device_assert((0 <= tmp74) & (tmp74 < 4) | ~xmask,
'index out of bounds: 0 <= tmp74 < 4')
tmp76 = tmp22 - tmp23
tmp77 = tmp42 * tmp76
tmp78 = tl.where(tmp33, tmp77, tmp25)
tmp79 = tmp8 * tmp78
tmp80 = tmp34 & tmp29
tmp81 = tmp60 * tmp76
tmp82 = tl.where(tmp80, tmp81, tmp25)
tmp83 = tl.where(tmp41, tmp46, tmp47)
tmp84 = tl.where(tmp41, tmp70, tmp47)
tmp85 = tl.where(tmp80, tmp64, tmp47)
tmp86 = tmp83 + tmp49
tmp87 = tmp83 < 0
tmp88 = tl.where(tmp87, tmp86, tmp83)
tl.device_assert((0 <= tmp88) & (tmp88 < 4) | ~xmask,
'index out of bounds: 0 <= tmp88 < 4')
tmp90 = tmp84 + tmp49
tmp91 = tmp84 < 0
tmp92 = tl.where(tmp91, tmp90, tmp84)
tl.device_assert((0 <= tmp92) & (tmp92 < 4) | ~xmask,
'index out of bounds: 0 <= tmp92 < 4')
tmp94 = tmp8 * tmp45
tmp95 = tmp94 + tmp63
tmp96 = tmp95 + tmp79
tmp97 = tmp85 + tmp49
tmp98 = tmp85 < 0
tmp99 = tl.where(tmp98, tmp97, tmp85)
tl.device_assert((0 <= tmp99) & (tmp99 < 4) | ~xmask,
'index out of bounds: 0 <= tmp99 < 4')
tmp101 = tl.where(tmp80, tmp54, tmp47)
tmp102 = tmp101 + tmp49
tmp103 = tmp101 < 0
tmp104 = tl.where(tmp103, tmp102, tmp101)
tl.device_assert((0 <= tmp104) & (tmp104 < 4) | ~xmask,
'index out of bounds: 0 <= tmp104 < 4')
tmp106 = tmp8 * tmp82
tmp107 = tmp96 + tmp106
tmp108 = tl.load(in_ptr1 + (tmp58 + 4 * tmp52 + 16 * x4), xmask,
eviction_policy='evict_last')
tmp109 = tmp108 * tmp62
tmp110 = tl.load(in_ptr1 + (tmp74 + 4 * tmp68 + 16 * x4), xmask,
eviction_policy='evict_last')
tmp111 = tmp110 * tmp78
tmp112 = tl.load(in_ptr1 + (tmp104 + 4 * tmp99 + 16 * x4), xmask,
eviction_policy='evict_last')
tmp113 = tmp112 * tmp82
tmp114 = tl.load(in_ptr1 + (tmp92 + 4 * tmp88 + 16 * x4), xmask,
eviction_policy='evict_last')
tmp115 = tmp114 * tmp45
tmp116 = tmp115 + tmp109
tmp117 = tmp116 + tmp111
tmp118 = tmp117 + tmp113
tmp119 = 0.9999
tmp120 = tmp107 > tmp119
tmp121 = tmp120.to(tl.float32)
tmp122 = tmp118 * tmp121
tl.store(in_out_ptr5 + x3, tmp122, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 2, 4, 4), (32, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_copy_div_mul_repeat_sub_0[grid(128)](arg0_1,
buf0, 128, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf13 = buf12
del buf12
buf27 = buf13
del buf13
triton_poi_fused__to_copy_grid_sampler_2d_gt_mul_ones_1[grid(256)](
buf27, buf0, arg1_1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
del buf0
return buf27,
def coords_grid(flow: 'Tensor') ->Tensor:
"""Generate shifted coordinate grid based based input flow.
Args:
flow (Tensor): Estimated optical flow.
Returns:
Tensor: The coordinate that shifted by input flow and scale in the
range [-1, 1].
"""
B, _, H, W = flow.shape
xx = torch.arange(0, W, device=flow.device, requires_grad=False)
yy = torch.arange(0, H, device=flow.device, requires_grad=False)
coords = torch.meshgrid(yy, xx)
coords = torch.stack(coords[::-1], dim=0).float()
grid = coords[None].repeat(B, 1, 1, 1) + flow
grid[:, 0, ...] = grid[:, 0, ...] * 2.0 / max(W - 1, 1) - 1.0
grid[:, 1, ...] = grid[:, 1, ...] * 2.0 / max(H - 1, 1) - 1.0
grid = grid.permute(0, 2, 3, 1)
return grid
class WarpNew(nn.Module):
"""Warping layer to warp feature using optical flow.
Args:
mode (str): interpolation mode to calculate output values. Options are
'bilinear' and 'nearest'. Defaults to 'bilinear'.
padding_mode (str): padding mode for outside grid values. Options are
'zero', 'border' and 'reflection'. Defaults to 'zeros'.
align_corners (bool): If set to True, the extrema (-1 and 1) are
considered as referring to the center points of the input’s corner
pixels. If set to False, they are instead considered as referring
to the corner points of the input’s corner pixels, making the
sampling more resolution agnostic. Default to False.
"""
def __init__(self, mode: 'str'='bilinear', padding_mode: 'str'='zeros',
align_corners: 'bool'=False, use_mask: 'bool'=True) ->None:
super().__init__()
self.mode = mode
self.padding_mode = padding_mode
self.align_corners = align_corners
self.use_mask = use_mask
def __repr__(self):
s = self.__class__.__name__
s += f'(mode={self.mode}, '
s += f'padding_mode={self.padding_mode}, '
s += f'align_corners={self.align_corners},'
s += f'use_mask={self.use_mask})'
return s
def forward(self, input_0, input_1):
arg1_1 = input_0
arg0_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
dimagrshk/opt_flow_attack
|
Warp
| false
| 12,286
|
[
"Apache-2.0"
] | 0
|
6bfad92abcf3eaae1a6ca05b865be072361636ed
|
https://github.com/dimagrshk/opt_flow_attack/tree/6bfad92abcf3eaae1a6ca05b865be072361636ed
|
Normalize
|
import torch
from torch import Tensor
import torch.nn.parallel
import torch.utils.data
import torch.nn.functional as F
import torch.onnx
import torch.optim
import torch.utils.data.distributed
class Normalize(torch.nn.Module):
"""Normalize a tensor image with mean and standard deviation.
This transform does not support PIL Image.
Given mean: ``(mean[1],...,mean[n])`` and std: ``(std[1],..,std[n])`` for ``n``
channels, this transform will normalize each channel of the input
``torch.*Tensor`` i.e.,
``output[channel] = (input[channel] - mean[channel]) / std[channel]``
.. note::
This transform acts out of place, i.e., it does not mutate the input tensor.
Args:
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
inplace(bool,optional): Bool to make this operation in-place.
"""
def __init__(self, mean, std, inplace=False):
super().__init__()
self.mean = mean
self.std = std
self.inplace = inplace
def forward(self, tensor: 'Tensor') ->Tensor:
"""
Args:
tensor (Tensor): Tensor image to be normalized.
Returns:
Tensor: Normalized Tensor image.
"""
return F.normalize(tensor, self.mean, self.std, self.inplace)
def __repr__(self):
return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.
mean, self.std)
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {'mean': 4, 'std': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn.parallel
import torch.utils.data
import torch.onnx
import torch.optim
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tmp1 * tmp1
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp5 * tmp5
tmp7 = tmp3 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp7 + tmp10
tmp13 = tmp12 * tmp12
tmp14 = tmp13 * tmp13
tmp15 = tmp11 + tmp14
tmp16 = 0.25
tmp17 = libdevice.pow(tmp15, tmp16)
tmp18 = 0.0
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp20 = tmp0 / tmp19
tl.store(out_ptr0 + x2, tmp20, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class NormalizeNew(torch.nn.Module):
"""Normalize a tensor image with mean and standard deviation.
This transform does not support PIL Image.
Given mean: ``(mean[1],...,mean[n])`` and std: ``(std[1],..,std[n])`` for ``n``
channels, this transform will normalize each channel of the input
``torch.*Tensor`` i.e.,
``output[channel] = (input[channel] - mean[channel]) / std[channel]``
.. note::
This transform acts out of place, i.e., it does not mutate the input tensor.
Args:
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
inplace(bool,optional): Bool to make this operation in-place.
"""
def __init__(self, mean, std, inplace=False):
super().__init__()
self.mean = mean
self.std = std
self.inplace = inplace
def __repr__(self):
return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.
mean, self.std)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
dineenai/pytorch_untrained_models
|
Normalize
| false
| 12,287
|
[
"BSD-3-Clause"
] | 0
|
eb301d3b8e3e87b8a79cd8cb4e1cb8d4e44a273a
|
https://github.com/dineenai/pytorch_untrained_models/tree/eb301d3b8e3e87b8a79cd8cb4e1cb8d4e44a273a
|
LowRankResidualDecoderLayer
|
import torch
import torch.nn as nn
import torch.utils.checkpoint
import torch.nn.functional as F
from torch.cuda.amp import autocast
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
@autocast()
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -2 ** 15)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output, attn
class LowRankResidualMultiHeadAttention(nn.Module):
""" Multi-Head Attention module """
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs_u = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False
)
self.w_qs_v = nn.Linear(d_model, int(n_head * d_k / 4), bias=False)
self.w_qs_res = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks_u = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False
)
self.w_ks_v = nn.Linear(d_model, int(n_head * d_k / 4), bias=False)
self.w_ks_res = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs_u = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False
)
self.w_vs_v = nn.Linear(d_model, int(n_head * d_k / 4), bias=False)
self.w_vs_res = nn.Linear(d_model, n_head * d_v, bias=False)
self.fc_u = nn.Linear(int(d_model / 4), d_model, bias=False)
self.fc_v = nn.Linear(n_head * d_v, int(d_model / 4), bias=False)
self.fc_res = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
@autocast()
def forward(self, q, k, v, mask=None):
d_k, _d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
residual = q
q = (self.w_qs_u(self.w_qs_v(q)) + self.w_qs_res(q)).view(sz_b,
len_q, n_head, d_k)
k = (self.w_ks_u(self.w_ks_v(k)) + self.w_ks_res(k)).view(sz_b,
len_k, n_head, d_k)
v = (self.w_vs_u(self.w_vs_v(v)) + self.w_vs_res(v)).view(sz_b,
len_v, n_head, d_k)
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1)
q, attn = self.attention(q, k, v, mask=mask)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc_u(self.fc_v(q)) + self.fc_res(q))
q += residual
q = self.layer_norm(q)
return q, attn
class LowRankResidualPositionwiseFeedForward(nn.Module):
""" A two-feed-forward-layer module """
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1_u = nn.Linear(int(d_in / 4), d_hid, bias=False)
self.w_1_v = nn.Linear(d_in, int(d_in / 4), bias=False)
self.w_1_res = nn.Linear(d_in, d_hid)
self.w_2_u = nn.Linear(int(d_in / 4), d_in, bias=False)
self.w_2_v = nn.Linear(d_hid, int(d_in / 4), bias=False)
self.w_2_res = nn.Linear(d_hid, d_in)
self.layer_norm = nn.LayerNorm(d_in, eps=1e-06)
self.dropout = nn.Dropout(dropout)
@autocast()
def forward(self, x):
residual = x
x = F.relu(self.w_1_u(self.w_1_v(x)) + self.w_1_res(x))
x = self.w_2_u(self.w_2_v(x)) + self.w_2_res(x)
x = self.dropout(x)
x += residual
x = self.layer_norm(x)
return x
class LowRankResidualDecoderLayer(nn.Module):
""" Compose with three layers """
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):
super(LowRankResidualDecoderLayer, self).__init__()
self.slf_attn = LowRankResidualMultiHeadAttention(n_head, d_model,
d_k, d_v, dropout=dropout)
self.enc_attn = LowRankResidualMultiHeadAttention(n_head, d_model,
d_k, d_v, dropout=dropout)
self.pos_ffn = LowRankResidualPositionwiseFeedForward(d_model,
d_inner, dropout=dropout)
@autocast()
def forward(self, dec_input, enc_output, slf_attn_mask=None,
dec_enc_attn_mask=None):
dec_output, dec_slf_attn = self.slf_attn(dec_input, dec_input,
dec_input, mask=slf_attn_mask)
dec_output, dec_enc_attn = self.enc_attn(dec_output, enc_output,
enc_output, mask=dec_enc_attn_mask)
dec_output = self.pos_ffn(dec_output)
return dec_output, dec_slf_attn, dec_enc_attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_inner': 4, 'n_head': 4, 'd_k': 4, 'd_v': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.utils.checkpoint
import torch.nn.functional as F
from torch.cuda.amp import autocast
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0.to(tl.float32)
tl.store(out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused__to_copy_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0.to(tl.float32)
tl.store(out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused_clone_div_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
tmp0 = tl.load(in_ptr0 + x4, xmask).to(tl.float32)
tmp1 = tl.load(in_ptr1 + x4, xmask).to(tl.float32)
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp4, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask).to(tl.float32)
tmp1 = tl.load(in_ptr1 + (x2 + 16 * y3), xmask & ymask).to(tl.float32)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.float32)
tmp2 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last').to(tl
.float32)
tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
).to(tl.float32)
tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
).to(tl.float32)
tmp10 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
).to(tl.float32)
tmp1 = tmp0.to(tl.float32)
tmp3 = tmp2.to(tl.float32)
tmp5 = tmp4.to(tl.float32)
tmp6 = triton_helpers.maximum(tmp3, tmp5)
tmp8 = tmp7.to(tl.float32)
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp11 = tmp10.to(tl.float32)
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp13 = tmp1 - tmp12
tmp14 = tl_math.exp(tmp13)
tl.store(out_ptr0 + x2, tmp14, xmask)
@triton.jit
def triton_poi_fused__softmax__to_copy_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = tmp8.to(tl.float32)
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
tmp0 = tl.load(in_ptr0 + x4, xmask).to(tl.float32)
tmp1 = tl.load(in_ptr1 + x4, xmask).to(tl.float32)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask).to(tl
.float32)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused__to_copy_t_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0.to(tl.float32)
tl.store(out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused_add_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask).to(tl.float32)
tmp1 = tl.load(in_ptr1 + x0, xmask).to(tl.float32)
tmp4 = tl.load(in_ptr2 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tmp2.to(tl.float32)
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_10(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused__to_copy_native_layer_norm_11(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tmp8.to(tl.float32)
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_12(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask).to(tl.float32)
tmp1 = tl.load(in_ptr1 + x0, xmask).to(tl.float32)
tmp4 = tl.load(in_out_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tmp2.to(tl.float32)
tmp5 = tmp3 + tmp4
tl.store(in_out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_13(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask).to(tl.float32)
tmp1 = tl.load(in_ptr0 + x2, xmask).to(tl.float32)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp2.to(tl.float32)
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp6 = tl.full([1], 0, tl.int32)
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp8 = 0.0
tmp9 = tmp7 <= tmp8
tl.store(in_out_ptr0 + x2, tmp7, xmask)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_14(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.float32)
tmp1 = tl.load(in_ptr1 + x2, xmask).to(tl.float32)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_out_ptr0 + x2, xmask)
tmp3 = tmp2.to(tl.float32)
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp6 = tmp5.to(tl.float32)
tmp8 = tmp6 + tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_15(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (16, 4), (4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (16, 4), (4, 1))
assert_size_stride(primals_10, (16, 4), (4, 1))
assert_size_stride(primals_11, (1, 16), (16, 1))
assert_size_stride(primals_12, (4, 1), (1, 1))
assert_size_stride(primals_13, (4, 16), (16, 1))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_17, (4, 4), (4, 1))
assert_size_stride(primals_18, (16, 4), (4, 1))
assert_size_stride(primals_19, (16, 4), (4, 1))
assert_size_stride(primals_20, (4, 4), (4, 1))
assert_size_stride(primals_21, (16, 4), (4, 1))
assert_size_stride(primals_22, (16, 4), (4, 1))
assert_size_stride(primals_23, (4, 4), (4, 1))
assert_size_stride(primals_24, (16, 4), (4, 1))
assert_size_stride(primals_25, (16, 4), (4, 1))
assert_size_stride(primals_26, (1, 16), (16, 1))
assert_size_stride(primals_27, (4, 1), (1, 1))
assert_size_stride(primals_28, (4, 16), (16, 1))
assert_size_stride(primals_29, (4,), (1,))
assert_size_stride(primals_30, (4,), (1,))
assert_size_stride(primals_31, (1, 4), (4, 1))
assert_size_stride(primals_32, (4, 1), (1, 1))
assert_size_stride(primals_33, (4, 4), (4, 1))
assert_size_stride(primals_34, (4,), (1,))
assert_size_stride(primals_35, (1, 4), (4, 1))
assert_size_stride(primals_36, (4, 1), (1, 1))
assert_size_stride(primals_37, (4, 4), (4, 1))
assert_size_stride(primals_38, (4,), (1,))
assert_size_stride(primals_39, (4,), (1,))
assert_size_stride(primals_40, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float16)
get_raw_stream(0)
triton_poi_fused__to_copy_0[grid(64)](primals_1, buf0, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float16)
triton_poi_fused__to_copy_1[grid(16)](primals_2, buf1, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float16)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float16)
triton_poi_fused__to_copy_0[grid(64)](primals_3, buf3, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del primals_3
buf4 = empty_strided_cuda((16, 16), (16, 1), torch.float16)
extern_kernels.mm(buf2, reinterpret_tensor(buf3, (4, 16), (1, 4), 0
), out=buf4)
buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float16)
triton_poi_fused__to_copy_0[grid(64)](primals_4, buf5, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del primals_4
buf6 = empty_strided_cuda((16, 16), (16, 1), torch.float16)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(buf5, (4, 16), (1, 4), 0), out=buf6)
buf7 = buf1
del buf1
triton_poi_fused__to_copy_1[grid(16)](primals_5, buf7, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del primals_5
buf8 = buf5
del buf5
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(buf7, (4, 4), (1, 4), 0), out=buf8)
buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float16)
triton_poi_fused__to_copy_0[grid(64)](primals_6, buf9, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del primals_6
buf10 = empty_strided_cuda((16, 16), (16, 1), torch.float16)
extern_kernels.mm(buf8, reinterpret_tensor(buf9, (4, 16), (1, 4), 0
), out=buf10)
buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float16)
triton_poi_fused__to_copy_0[grid(64)](primals_7, buf11, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del primals_7
buf12 = empty_strided_cuda((16, 16), (16, 1), torch.float16)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(buf11, (4, 16), (1, 4), 0), out=buf12)
buf13 = buf7
del buf7
triton_poi_fused__to_copy_1[grid(16)](primals_8, buf13, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del primals_8
buf14 = buf11
del buf11
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(buf13, (4, 4), (1, 4), 0), out=buf14)
buf15 = empty_strided_cuda((16, 4), (4, 1), torch.float16)
triton_poi_fused__to_copy_0[grid(64)](primals_9, buf15, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del primals_9
buf16 = empty_strided_cuda((16, 16), (16, 1), torch.float16)
extern_kernels.mm(buf14, reinterpret_tensor(buf15, (4, 16), (1, 4),
0), out=buf16)
buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float16)
triton_poi_fused__to_copy_0[grid(64)](primals_10, buf17, 64, XBLOCK
=64, num_warps=1, num_stages=1)
del primals_10
buf18 = empty_strided_cuda((16, 16), (16, 1), torch.float16)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(buf17, (4, 16), (1, 4), 0), out=buf18)
buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16)
triton_poi_fused_clone_div_2[grid(256)](buf4, buf6, buf19, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf20 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
triton_poi_fused_clone_3[grid(16, 16)](buf10, buf12, buf20, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
buf21 = reinterpret_tensor(buf12, (16, 4, 4), (16, 4, 1), 0)
del buf12
extern_kernels.bmm(reinterpret_tensor(buf19, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf20, (16, 4, 4), (16, 4, 1), 0), out=buf21
)
buf22 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(256)](buf21, buf22, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf23 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf25 = reinterpret_tensor(buf21, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf21
triton_poi_fused__softmax__to_copy_5[grid(256)](buf22, buf23, buf25,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf24 = reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf10
triton_poi_fused_clone_6[grid(256)](buf16, buf18, buf24, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf26 = reinterpret_tensor(buf18, (16, 4, 4), (16, 4, 1), 0)
del buf18
extern_kernels.bmm(reinterpret_tensor(buf25, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf24, (16, 4, 4), (16, 4, 1), 0), out=buf26
)
buf27 = reinterpret_tensor(buf13, (16, 1), (1, 16), 0)
del buf13
triton_poi_fused__to_copy_1[grid(16)](primals_11, buf27, 16, XBLOCK
=16, num_warps=1, num_stages=1)
del primals_11
buf28 = reinterpret_tensor(buf16, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf16
triton_poi_fused_clone_7[grid(256)](buf26, buf28, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf29 = empty_strided_cuda((16, 1), (1, 1), torch.float16)
extern_kernels.mm(reinterpret_tensor(buf28, (16, 16), (16, 1), 0),
buf27, out=buf29)
buf30 = empty_strided_cuda((1, 4), (4, 1), torch.float16)
triton_poi_fused__to_copy_t_8[grid(4)](primals_12, buf30, 4, XBLOCK
=4, num_warps=1, num_stages=1)
del primals_12
buf31 = buf17
del buf17
extern_kernels.mm(buf29, buf30, out=buf31)
buf32 = empty_strided_cuda((16, 4), (1, 16), torch.float16)
triton_poi_fused__to_copy_0[grid(64)](primals_13, buf32, 64, XBLOCK
=64, num_warps=1, num_stages=1)
del primals_13
buf33 = empty_strided_cuda((16, 4), (4, 1), torch.float16)
extern_kernels.mm(reinterpret_tensor(buf28, (16, 16), (16, 1), 0),
buf32, out=buf33)
buf34 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_9[grid(64)](buf31, buf33, primals_1, buf34, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
buf35 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf36 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_native_layer_norm_10[grid(16)](buf34, buf35, buf36,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf37 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf39 = reinterpret_tensor(buf33, (4, 4, 4), (16, 4, 1), 0)
del buf33
triton_poi_fused__to_copy_native_layer_norm_11[grid(64)](buf34,
buf35, buf36, primals_14, primals_15, buf37, buf39, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del primals_15
buf38 = empty_strided_cuda((4, 4), (1, 4), torch.float16)
triton_poi_fused__to_copy_1[grid(16)](primals_17, buf38, 16, XBLOCK
=16, num_warps=1, num_stages=1)
del primals_17
buf40 = buf31
del buf31
extern_kernels.mm(reinterpret_tensor(buf39, (16, 4), (4, 1), 0),
buf38, out=buf40)
buf41 = empty_strided_cuda((16, 4), (4, 1), torch.float16)
triton_poi_fused__to_copy_0[grid(64)](primals_18, buf41, 64, XBLOCK
=64, num_warps=1, num_stages=1)
del primals_18
buf42 = reinterpret_tensor(buf26, (16, 16), (16, 1), 0)
del buf26
extern_kernels.mm(buf40, reinterpret_tensor(buf41, (4, 16), (1, 4),
0), out=buf42)
buf43 = empty_strided_cuda((16, 4), (4, 1), torch.float16)
triton_poi_fused__to_copy_0[grid(64)](primals_19, buf43, 64, XBLOCK
=64, num_warps=1, num_stages=1)
del primals_19
buf44 = buf4
del buf4
extern_kernels.mm(reinterpret_tensor(buf39, (16, 4), (4, 1), 0),
reinterpret_tensor(buf43, (4, 16), (1, 4), 0), out=buf44)
buf45 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float16)
triton_poi_fused__to_copy_0[grid(64)](primals_16, buf45, 64, XBLOCK
=64, num_warps=1, num_stages=1)
del primals_16
buf46 = empty_strided_cuda((4, 4), (4, 1), torch.float16)
triton_poi_fused__to_copy_1[grid(16)](primals_20, buf46, 16, XBLOCK
=16, num_warps=1, num_stages=1)
del primals_20
buf47 = empty_strided_cuda((16, 4), (4, 1), torch.float16)
extern_kernels.mm(reinterpret_tensor(buf45, (16, 4), (4, 1), 0),
reinterpret_tensor(buf46, (4, 4), (1, 4), 0), out=buf47)
buf48 = empty_strided_cuda((16, 4), (4, 1), torch.float16)
triton_poi_fused__to_copy_0[grid(64)](primals_21, buf48, 64, XBLOCK
=64, num_warps=1, num_stages=1)
del primals_21
buf49 = empty_strided_cuda((16, 16), (16, 1), torch.float16)
extern_kernels.mm(buf47, reinterpret_tensor(buf48, (4, 16), (1, 4),
0), out=buf49)
buf50 = empty_strided_cuda((16, 4), (4, 1), torch.float16)
triton_poi_fused__to_copy_0[grid(64)](primals_22, buf50, 64, XBLOCK
=64, num_warps=1, num_stages=1)
del primals_22
buf51 = empty_strided_cuda((16, 16), (16, 1), torch.float16)
extern_kernels.mm(reinterpret_tensor(buf45, (16, 4), (4, 1), 0),
reinterpret_tensor(buf50, (4, 16), (1, 4), 0), out=buf51)
buf52 = buf46
del buf46
triton_poi_fused__to_copy_1[grid(16)](primals_23, buf52, 16, XBLOCK
=16, num_warps=1, num_stages=1)
del primals_23
buf53 = buf50
del buf50
extern_kernels.mm(reinterpret_tensor(buf45, (16, 4), (4, 1), 0),
reinterpret_tensor(buf52, (4, 4), (1, 4), 0), out=buf53)
buf54 = empty_strided_cuda((16, 4), (4, 1), torch.float16)
triton_poi_fused__to_copy_0[grid(64)](primals_24, buf54, 64, XBLOCK
=64, num_warps=1, num_stages=1)
del primals_24
buf55 = empty_strided_cuda((16, 16), (16, 1), torch.float16)
extern_kernels.mm(buf53, reinterpret_tensor(buf54, (4, 16), (1, 4),
0), out=buf55)
buf56 = empty_strided_cuda((16, 4), (4, 1), torch.float16)
triton_poi_fused__to_copy_0[grid(64)](primals_25, buf56, 64, XBLOCK
=64, num_warps=1, num_stages=1)
del primals_25
buf57 = empty_strided_cuda((16, 16), (16, 1), torch.float16)
extern_kernels.mm(reinterpret_tensor(buf45, (16, 4), (4, 1), 0),
reinterpret_tensor(buf56, (4, 16), (1, 4), 0), out=buf57)
buf58 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float16)
triton_poi_fused_clone_div_2[grid(256)](buf42, buf44, buf58, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del buf42
buf59 = reinterpret_tensor(buf44, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf44
triton_poi_fused_clone_3[grid(16, 16)](buf49, buf51, buf59, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
buf60 = reinterpret_tensor(buf51, (16, 4, 4), (16, 4, 1), 0)
del buf51
extern_kernels.bmm(reinterpret_tensor(buf58, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf59, (16, 4, 4), (16, 4, 1), 0), out=buf60
)
buf61 = buf22
del buf22
triton_poi_fused__softmax_4[grid(256)](buf60, buf61, 256, XBLOCK=
128, num_warps=4, num_stages=1)
buf62 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf64 = reinterpret_tensor(buf60, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf60
triton_poi_fused__softmax__to_copy_5[grid(256)](buf61, buf62, buf64,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf61
buf63 = reinterpret_tensor(buf49, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf49
triton_poi_fused_clone_6[grid(256)](buf55, buf57, buf63, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf65 = reinterpret_tensor(buf57, (16, 4, 4), (16, 4, 1), 0)
del buf57
extern_kernels.bmm(reinterpret_tensor(buf64, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf63, (16, 4, 4), (16, 4, 1), 0), out=buf65
)
buf66 = reinterpret_tensor(buf52, (16, 1), (1, 16), 0)
del buf52
triton_poi_fused__to_copy_1[grid(16)](primals_26, buf66, 16, XBLOCK
=16, num_warps=1, num_stages=1)
del primals_26
buf67 = reinterpret_tensor(buf55, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf55
triton_poi_fused_clone_7[grid(256)](buf65, buf67, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf65
buf68 = empty_strided_cuda((16, 1), (1, 1), torch.float16)
extern_kernels.mm(reinterpret_tensor(buf67, (16, 16), (16, 1), 0),
buf66, out=buf68)
buf69 = empty_strided_cuda((1, 4), (4, 1), torch.float16)
triton_poi_fused__to_copy_t_8[grid(4)](primals_27, buf69, 4, XBLOCK
=4, num_warps=1, num_stages=1)
del primals_27
buf70 = buf56
del buf56
extern_kernels.mm(buf68, buf69, out=buf70)
buf71 = empty_strided_cuda((16, 4), (1, 16), torch.float16)
triton_poi_fused__to_copy_0[grid(64)](primals_28, buf71, 64, XBLOCK
=64, num_warps=1, num_stages=1)
del primals_28
buf72 = empty_strided_cuda((16, 4), (4, 1), torch.float16)
extern_kernels.mm(reinterpret_tensor(buf67, (16, 16), (16, 1), 0),
buf71, out=buf72)
buf73 = buf37
del buf37
triton_poi_fused_add_12[grid(64)](buf73, buf70, buf72, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf74 = buf36
del buf36
buf75 = buf35
del buf35
triton_poi_fused_native_layer_norm_10[grid(16)](buf73, buf74, buf75,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf76 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf78 = reinterpret_tensor(buf72, (4, 4, 4), (16, 4, 1), 0)
del buf72
triton_poi_fused__to_copy_native_layer_norm_11[grid(64)](buf73,
buf74, buf75, primals_29, primals_30, buf76, buf78, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del primals_30
buf77 = empty_strided_cuda((4, 1), (1, 4), torch.float16)
triton_poi_fused__to_copy_t_8[grid(4)](primals_31, buf77, 4, XBLOCK
=4, num_warps=1, num_stages=1)
del primals_31
buf79 = empty_strided_cuda((16, 1), (1, 1), torch.float16)
extern_kernels.mm(reinterpret_tensor(buf78, (16, 4), (4, 1), 0),
buf77, out=buf79)
buf80 = empty_strided_cuda((1, 4), (4, 1), torch.float16)
triton_poi_fused__to_copy_t_8[grid(4)](primals_32, buf80, 4, XBLOCK
=4, num_warps=1, num_stages=1)
del primals_32
buf81 = buf70
del buf70
extern_kernels.mm(buf79, buf80, out=buf81)
buf82 = empty_strided_cuda((4, 4), (1, 4), torch.float16)
triton_poi_fused__to_copy_1[grid(16)](primals_33, buf82, 16, XBLOCK
=16, num_warps=1, num_stages=1)
del primals_33
buf83 = empty_strided_cuda((16, 4), (4, 1), torch.float16)
extern_kernels.mm(reinterpret_tensor(buf78, (16, 4), (4, 1), 0),
buf82, out=buf83)
buf84 = empty_strided_cuda((4, 1), (1, 4), torch.float16)
triton_poi_fused__to_copy_t_8[grid(4)](primals_35, buf84, 4, XBLOCK
=4, num_warps=1, num_stages=1)
del primals_35
buf85 = reinterpret_tensor(buf81, (4, 4, 4), (16, 4, 1), 0)
del buf81
buf95 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_13[grid(64)](buf85,
buf83, primals_34, buf95, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_34
buf86 = empty_strided_cuda((16, 1), (1, 1), torch.float16)
extern_kernels.mm(reinterpret_tensor(buf85, (16, 4), (4, 1), 0),
buf84, out=buf86)
buf87 = empty_strided_cuda((1, 4), (4, 1), torch.float16)
triton_poi_fused__to_copy_t_8[grid(4)](primals_36, buf87, 4, XBLOCK
=4, num_warps=1, num_stages=1)
del primals_36
buf88 = buf83
del buf83
extern_kernels.mm(buf86, buf87, out=buf88)
buf89 = empty_strided_cuda((4, 4), (1, 4), torch.float16)
triton_poi_fused__to_copy_1[grid(16)](primals_37, buf89, 16, XBLOCK
=16, num_warps=1, num_stages=1)
del primals_37
buf90 = empty_strided_cuda((16, 4), (4, 1), torch.float16)
extern_kernels.mm(reinterpret_tensor(buf85, (16, 4), (4, 1), 0),
buf89, out=buf90)
buf91 = buf76
del buf76
triton_poi_fused_add_14[grid(64)](buf91, buf88, buf90, primals_38,
64, XBLOCK=64, num_warps=1, num_stages=1)
del buf88
del buf90
del primals_38
buf92 = buf75
del buf75
buf93 = buf74
del buf74
triton_poi_fused_native_layer_norm_10[grid(16)](buf91, buf92, buf93,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf94 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_15[grid(64)](buf91, buf92, buf93,
primals_39, primals_40, buf94, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf92
del buf93
del primals_40
return (buf94, buf23, buf62, primals_14, primals_29, primals_39,
reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(
buf3, (4, 16), (1, 4), 0), buf2, reinterpret_tensor(buf9, (4, 16),
(1, 4), 0), buf8, reinterpret_tensor(buf15, (4, 16), (1, 4), 0),
buf14, buf23, reinterpret_tensor(buf28, (16, 16), (16, 1), 0),
buf29, buf34, reinterpret_tensor(buf39, (16, 4), (4, 1), 0),
reinterpret_tensor(buf41, (4, 16), (1, 4), 0), buf40,
reinterpret_tensor(buf43, (4, 16), (1, 4), 0), reinterpret_tensor(
buf45, (16, 4), (4, 1), 0), reinterpret_tensor(buf48, (4, 16), (1,
4), 0), buf47, reinterpret_tensor(buf54, (4, 16), (1, 4), 0), buf53,
buf62, reinterpret_tensor(buf67, (16, 16), (16, 1), 0), buf68,
buf73, reinterpret_tensor(buf78, (16, 4), (4, 1), 0), buf79,
reinterpret_tensor(buf85, (16, 4), (4, 1), 0), buf86, buf91,
reinterpret_tensor(buf89, (4, 4), (4, 1), 0), reinterpret_tensor(
buf87, (4, 1), (1, 1), 0), reinterpret_tensor(buf84, (1, 4), (4, 1),
0), buf95, reinterpret_tensor(buf82, (4, 4), (4, 1), 0),
reinterpret_tensor(buf80, (4, 1), (1, 1), 0), reinterpret_tensor(
buf77, (1, 4), (4, 1), 0), reinterpret_tensor(buf71, (4, 16), (16,
1), 0), reinterpret_tensor(buf69, (4, 1), (1, 1), 0),
reinterpret_tensor(buf66, (1, 16), (16, 1), 0), reinterpret_tensor(
buf64, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf63, (16, 4,
4), (16, 1, 4), 0), reinterpret_tensor(buf58, (16, 4, 4), (16, 1, 4
), 0), reinterpret_tensor(buf59, (16, 4, 4), (16, 1, 4), 0),
reinterpret_tensor(buf38, (4, 4), (4, 1), 0), reinterpret_tensor(
buf32, (4, 16), (16, 1), 0), reinterpret_tensor(buf30, (4, 1), (1,
1), 0), reinterpret_tensor(buf27, (1, 16), (16, 1), 0),
reinterpret_tensor(buf25, (16, 4, 4), (16, 1, 4), 0),
reinterpret_tensor(buf24, (16, 4, 4), (16, 1, 4), 0),
reinterpret_tensor(buf19, (16, 4, 4), (16, 1, 4), 0),
reinterpret_tensor(buf20, (16, 4, 4), (16, 1, 4), 0))
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
@autocast()
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -2 ** 15)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output, attn
class LowRankResidualMultiHeadAttention(nn.Module):
""" Multi-Head Attention module """
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs_u = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False
)
self.w_qs_v = nn.Linear(d_model, int(n_head * d_k / 4), bias=False)
self.w_qs_res = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks_u = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False
)
self.w_ks_v = nn.Linear(d_model, int(n_head * d_k / 4), bias=False)
self.w_ks_res = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs_u = nn.Linear(int(n_head * d_k / 4), n_head * d_k, bias=False
)
self.w_vs_v = nn.Linear(d_model, int(n_head * d_k / 4), bias=False)
self.w_vs_res = nn.Linear(d_model, n_head * d_v, bias=False)
self.fc_u = nn.Linear(int(d_model / 4), d_model, bias=False)
self.fc_v = nn.Linear(n_head * d_v, int(d_model / 4), bias=False)
self.fc_res = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
@autocast()
def forward(self, q, k, v, mask=None):
d_k, _d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
residual = q
q = (self.w_qs_u(self.w_qs_v(q)) + self.w_qs_res(q)).view(sz_b,
len_q, n_head, d_k)
k = (self.w_ks_u(self.w_ks_v(k)) + self.w_ks_res(k)).view(sz_b,
len_k, n_head, d_k)
v = (self.w_vs_u(self.w_vs_v(v)) + self.w_vs_res(v)).view(sz_b,
len_v, n_head, d_k)
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1)
q, attn = self.attention(q, k, v, mask=mask)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc_u(self.fc_v(q)) + self.fc_res(q))
q += residual
q = self.layer_norm(q)
return q, attn
class LowRankResidualPositionwiseFeedForward(nn.Module):
""" A two-feed-forward-layer module """
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1_u = nn.Linear(int(d_in / 4), d_hid, bias=False)
self.w_1_v = nn.Linear(d_in, int(d_in / 4), bias=False)
self.w_1_res = nn.Linear(d_in, d_hid)
self.w_2_u = nn.Linear(int(d_in / 4), d_in, bias=False)
self.w_2_v = nn.Linear(d_hid, int(d_in / 4), bias=False)
self.w_2_res = nn.Linear(d_hid, d_in)
self.layer_norm = nn.LayerNorm(d_in, eps=1e-06)
self.dropout = nn.Dropout(dropout)
@autocast()
def forward(self, x):
residual = x
x = F.relu(self.w_1_u(self.w_1_v(x)) + self.w_1_res(x))
x = self.w_2_u(self.w_2_v(x)) + self.w_2_res(x)
x = self.dropout(x)
x += residual
x = self.layer_norm(x)
return x
class LowRankResidualDecoderLayerNew(nn.Module):
""" Compose with three layers """
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):
super(LowRankResidualDecoderLayerNew, self).__init__()
self.slf_attn = LowRankResidualMultiHeadAttention(n_head, d_model,
d_k, d_v, dropout=dropout)
self.enc_attn = LowRankResidualMultiHeadAttention(n_head, d_model,
d_k, d_v, dropout=dropout)
self.pos_ffn = LowRankResidualPositionwiseFeedForward(d_model,
d_inner, dropout=dropout)
def forward(self, input_0, input_1):
primals_3 = self.slf_attn.w_qs_u.weight
primals_2 = self.slf_attn.w_qs_v.weight
primals_4 = self.slf_attn.w_qs_res.weight
primals_6 = self.slf_attn.w_ks_u.weight
primals_5 = self.slf_attn.w_ks_v.weight
primals_7 = self.slf_attn.w_ks_res.weight
primals_9 = self.slf_attn.w_vs_u.weight
primals_8 = self.slf_attn.w_vs_v.weight
primals_10 = self.slf_attn.w_vs_res.weight
primals_12 = self.slf_attn.fc_u.weight
primals_11 = self.slf_attn.fc_v.weight
primals_13 = self.slf_attn.fc_res.weight
primals_14 = self.slf_attn.layer_norm.weight
primals_15 = self.slf_attn.layer_norm.bias
primals_18 = self.enc_attn.w_qs_u.weight
primals_17 = self.enc_attn.w_qs_v.weight
primals_19 = self.enc_attn.w_qs_res.weight
primals_21 = self.enc_attn.w_ks_u.weight
primals_20 = self.enc_attn.w_ks_v.weight
primals_22 = self.enc_attn.w_ks_res.weight
primals_24 = self.enc_attn.w_vs_u.weight
primals_23 = self.enc_attn.w_vs_v.weight
primals_25 = self.enc_attn.w_vs_res.weight
primals_27 = self.enc_attn.fc_u.weight
primals_26 = self.enc_attn.fc_v.weight
primals_28 = self.enc_attn.fc_res.weight
primals_29 = self.enc_attn.layer_norm.weight
primals_30 = self.enc_attn.layer_norm.bias
primals_32 = self.pos_ffn.w_1_u.weight
primals_31 = self.pos_ffn.w_1_v.weight
primals_33 = self.pos_ffn.w_1_res.weight
primals_34 = self.pos_ffn.w_1_res.bias
primals_36 = self.pos_ffn.w_2_u.weight
primals_35 = self.pos_ffn.w_2_v.weight
primals_37 = self.pos_ffn.w_2_res.weight
primals_38 = self.pos_ffn.w_2_res.bias
primals_39 = self.pos_ffn.layer_norm.weight
primals_40 = self.pos_ffn.layer_norm.bias
primals_1 = input_0
primals_16 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40])
return output[0], output[1], output[2]
|
bahducoup/factorized_training
|
LowRankResidualDecoderLayer
| false
| 12,288
|
[
"MIT"
] | 0
|
0af38f16338a9bcfcc11091b1a6b75befd67f234
|
https://github.com/bahducoup/factorized_training/tree/0af38f16338a9bcfcc11091b1a6b75befd67f234
|
SelfAttention
|
import torch
import torch.nn.functional as F
from torch import nn
class SelfAttention(nn.Module):
def __init__(self, embedding_dimension, num_heads):
super().__init__()
assert embedding_dimension % num_heads == 0, f'embedding dimension must be divisible by number of heads, got embedding_dimension={embedding_dimension!r}, num_heads={num_heads!r}'
self.num_heads = num_heads
k = embedding_dimension
self.to_keys = nn.Linear(k, k * num_heads, bias=False)
self.to_queries = nn.Linear(k, k * num_heads, bias=False)
self.to_values = nn.Linear(k, k * num_heads, bias=False)
self.unify_heads = nn.Linear(num_heads * k, k)
def forward(self, x):
b, t, k = x.size()
h = self.num_heads
keys = self.to_keys(x).view(b, t, h, k)
queries = self.to_queries(x).view(b, t, h, k)
values = self.to_values(x).view(b, t, h, k)
keys = keys.transpose(1, 2).contiguous().view(b * h, t, k)
queries = queries.transpose(1, 2).contiguous().view(b * h, t, k)
values = values.transpose(1, 2).contiguous().view(b * h, t, k)
dot = torch.bmm(queries, keys.transpose(1, 2))
dot = dot / k ** (1 / 2)
dot = F.softmax(dot, dim=2)
out = torch.bmm(dot, values).view(b, h, t, k)
out = out.transpose(1, 2).contiguous().view(b, t, h * k)
return self.unify_heads(out)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'embedding_dimension': 4, 'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (4, 16), (16, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](buf1, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_clone_0[grid(256)](buf0, buf4, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0)
del buf0
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = buf5
del buf5
triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
triton_poi_fused_clone_0[grid(256)](buf2, buf8, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 4, 4), (16,
4, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_0[grid(256)](buf9, buf10, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf9
buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf10, (16, 16),
(16, 1), 0), reinterpret_tensor(primals_5, (16, 4), (1, 16), 0),
alpha=1, beta=1, out=buf11)
del primals_6
return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf10, (16, 16), (16, 1), 0
), primals_5, reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0)
class SelfAttentionNew(nn.Module):
def __init__(self, embedding_dimension, num_heads):
super().__init__()
assert embedding_dimension % num_heads == 0, f'embedding dimension must be divisible by number of heads, got embedding_dimension={embedding_dimension!r}, num_heads={num_heads!r}'
self.num_heads = num_heads
k = embedding_dimension
self.to_keys = nn.Linear(k, k * num_heads, bias=False)
self.to_queries = nn.Linear(k, k * num_heads, bias=False)
self.to_values = nn.Linear(k, k * num_heads, bias=False)
self.unify_heads = nn.Linear(num_heads * k, k)
def forward(self, input_0):
primals_2 = self.to_keys.weight
primals_3 = self.to_queries.weight
primals_4 = self.to_values.weight
primals_5 = self.unify_heads.weight
primals_6 = self.unify_heads.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
dimitrios-ebi/gene_symbol_classifier
|
SelfAttention
| false
| 12,289
|
[
"Apache-2.0"
] | 0
|
fe415f719fda4619041b9fe0639996c92e0f12a8
|
https://github.com/dimitrios-ebi/gene_symbol_classifier/tree/fe415f719fda4619041b9fe0639996c92e0f12a8
|
MHAttentionMap
|
import torch
import torch.nn as nn
from torch.nn import functional as F
import torch._utils
class MHAttentionMap(nn.Module):
"""This is a 2D attention module, which only returns the attention softmax (no multiplication by value)"""
def __init__(self, query_dim, hidden_dim, num_heads=1, dropout=0.0,
bias=True):
super().__init__()
self.num_heads = num_heads
self.hidden_dim = hidden_dim
self.dropout = nn.Dropout(dropout)
self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
nn.init.zeros_(self.k_linear.bias)
nn.init.zeros_(self.q_linear.bias)
nn.init.xavier_uniform_(self.k_linear.weight)
nn.init.xavier_uniform_(self.q_linear.weight)
self.normalize_fact = float(hidden_dim / self.num_heads) ** -0.5
def forward(self, q, k):
q = self.q_linear(q)
k = F.conv2d(k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1),
self.k_linear.bias)
qh = q.view(q.shape[0], q.shape[1], self.num_heads, self.hidden_dim //
self.num_heads)
kh = k.view(k.shape[0], self.num_heads, self.hidden_dim // self.
num_heads, k.shape[-2], k.shape[-1])
weights = torch.einsum('bqnc,bnchw->bqnhw', qh * self.
normalize_fact, kh)
weights = F.softmax(weights.flatten(2), dim=-1).view_as(weights)
weights = self.dropout(weights)
return weights
def get_inputs():
return [torch.rand([4, 4, 1, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'query_dim': 4, 'hidden_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch._utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_per_fused__softmax_2(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 1, 4), (16, 4, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = extern_kernels.convolution(primals_6, reinterpret_tensor(
primals_4, (4, 4, 1, 1), (4, 1, 1, 1), 0), stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(64)](buf2, primals_2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_2
buf3 = buf1
del buf1
triton_poi_fused_convolution_1[grid(256)](buf3, primals_5, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf3, (4, 4, 16), (64, 16, 1), 0), out=buf4)
buf7 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_per_fused__softmax_2[grid(16)](buf4, buf7, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del buf4
return reinterpret_tensor(buf7, (4, 4, 1, 4, 4), (64, 16, 16, 4, 1), 0
), primals_6, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_4, (4, 4, 1, 1), (4, 1, 1, 1), 0
), buf7, reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf3, (4, 16, 4), (64, 1, 16), 0)
class MHAttentionMapNew(nn.Module):
"""This is a 2D attention module, which only returns the attention softmax (no multiplication by value)"""
def __init__(self, query_dim, hidden_dim, num_heads=1, dropout=0.0,
bias=True):
super().__init__()
self.num_heads = num_heads
self.hidden_dim = hidden_dim
self.dropout = nn.Dropout(dropout)
self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
nn.init.zeros_(self.k_linear.bias)
nn.init.zeros_(self.q_linear.bias)
nn.init.xavier_uniform_(self.k_linear.weight)
nn.init.xavier_uniform_(self.q_linear.weight)
self.normalize_fact = float(hidden_dim / self.num_heads) ** -0.5
def forward(self, input_0, input_1):
primals_1 = self.q_linear.weight
primals_2 = self.q_linear.bias
primals_4 = self.k_linear.weight
primals_5 = self.k_linear.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
dingmyu/mmclassification
|
MHAttentionMap
| false
| 12,290
|
[
"Apache-2.0"
] | 0
|
c600b22907fb9423899f7c308c659168c2d01cd8
|
https://github.com/dingmyu/mmclassification/tree/c600b22907fb9423899f7c308c659168c2d01cd8
|
GNNExplainerProbe
|
import math
import torch
class AbstractTorchModule(torch.nn.Module):
def __init__(self):
torch.nn.Module.__init__(self)
def save(self, path):
None
torch.save(self.state_dict(), path)
def load(self, path):
None
self.load_state_dict(torch.load(path, map_location=self.device))
def set_device(self, device):
self.device = device
self
class GNNExplainerProbe(AbstractTorchModule):
def __init__(self, num_edges, num_layers, init_strategy='normal',
const_val=1.0):
super(GNNExplainerProbe, self).__init__()
mask = torch.empty((num_layers, num_edges))
if init_strategy == 'normal':
std = torch.nn.init.calculate_gain('relu') * math.sqrt(2.0 / (2 *
math.sqrt(num_edges)))
with torch.no_grad():
mask.normal_(1.0, std)
elif init_strategy == 'const':
torch.nn.init.constant_(mask, const_val)
self.mask = torch.nn.Parameter(mask)
def forward(self):
s = torch.sigmoid(self.mask)
mask_ent = -s * torch.log(s) - (1 - s) * torch.log(1 - s)
penalty = mask_ent.mean() + 0.5 * s.sum()
return s, penalty
def get_inputs():
return []
def get_init_inputs():
return [[], {'num_edges': 4, 'num_layers': 1}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_log_mean_mul_neg_rsub_sigmoid_sub_sum_0(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.sigmoid(tmp0)
tmp2 = -tmp1
tmp3 = tl_math.log(tmp1)
tmp4 = tmp2 * tmp3
tmp5 = 1.0
tmp6 = tmp5 - tmp1
tmp7 = tl_math.log(tmp6)
tmp8 = tmp6 * tmp7
tmp9 = tmp4 - tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp13 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp15 = tl.sum(tmp13, 1)[:, None]
tmp16 = 4.0
tmp17 = tmp12 / tmp16
tmp18 = 0.5
tmp19 = tmp15 * tmp18
tmp20 = tmp17 + tmp19
tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp1, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp20, None)
def call(args):
primals_1, = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((), (), torch.float32)
buf3 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_add_log_mean_mul_neg_rsub_sigmoid_sub_sum_0[grid(1)](
buf3, primals_1, buf0, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del primals_1
return buf0, buf3, buf0
class AbstractTorchModule(torch.nn.Module):
def __init__(self):
torch.nn.Module.__init__(self)
def save(self, path):
None
torch.save(self.state_dict(), path)
def load(self, path):
None
self.load_state_dict(torch.load(path, map_location=self.device))
def set_device(self, device):
self.device = device
self
class GNNExplainerProbeNew(AbstractTorchModule):
def __init__(self, num_edges, num_layers, init_strategy='normal',
const_val=1.0):
super(GNNExplainerProbeNew, self).__init__()
mask = torch.empty((num_layers, num_edges))
if init_strategy == 'normal':
std = torch.nn.init.calculate_gain('relu') * math.sqrt(2.0 / (2 *
math.sqrt(num_edges)))
with torch.no_grad():
mask.normal_(1.0, std)
elif init_strategy == 'const':
torch.nn.init.constant_(mask, const_val)
self.mask = torch.nn.Parameter(mask)
def forward(self):
primals_1 = self.mask
output = call([primals_1])
return output[0], output[1]
|
djz233/GraphMask
|
GNNExplainerProbe
| false
| 12,291
|
[
"MIT"
] | 0
|
4b699a1685f0d26973bb90cd75b09d74726cdc2f
|
https://github.com/djz233/GraphMask/tree/4b699a1685f0d26973bb90cd75b09d74726cdc2f
|
DenseGCNConv
|
import math
import torch
from torch.nn import Parameter
import torch.utils.data
def glorot(tensor):
if tensor is not None:
stdv = math.sqrt(6.0 / (tensor.size(-2) + tensor.size(-1)))
tensor.data.uniform_(-stdv, stdv)
def zeros(tensor):
if tensor is not None:
tensor.data.fill_(0)
class DenseGCNConv(torch.nn.Module):
"""See :class:`torch_geometric.nn.conv.GCNConv`.
"""
def __init__(self, in_channels, out_channels, improved=False, bias=True):
super(DenseGCNConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.improved = improved
self.weight = Parameter(torch.Tensor(self.in_channels, out_channels))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
glorot(self.weight)
zeros(self.bias)
def forward(self, x, adj, mask=None, add_loop=True):
"""
Args:
x (Tensor): Node feature tensor :math:`\\mathbf{X} \\in \\mathbb{R}^{B
\\times N \\times F}`, with batch-size :math:`B`, (maximum)
number of nodes :math:`N` for each graph, and feature
dimension :math:`F`.
adj (Tensor): Adjacency tensor :math:`\\mathbf{A} \\in \\mathbb{R}^{B
\\times N \\times N}`. The adjacency tensor is broadcastable in
the batch dimension, resulting in a shared adjacency matrix for
the complete batch.
mask (BoolTensor, optional): Mask matrix
:math:`\\mathbf{M} \\in {\\{ 0, 1 \\}}^{B \\times N}` indicating
the valid nodes for each graph. (default: :obj:`None`)
add_loop (bool, optional): If set to :obj:`False`, the layer will
not automatically add self-loops to the adjacency matrices.
(default: :obj:`True`)
"""
x = x.unsqueeze(0) if x.dim() == 2 else x
adj = adj.unsqueeze(0) if adj.dim() == 2 else adj
B, N, _ = adj.size()
if add_loop:
adj = adj.clone()
idx = torch.arange(N, dtype=torch.long, device=adj.device)
adj[:, idx, idx] = 1 if not self.improved else 2
out = torch.matmul(x, self.weight)
deg_inv_sqrt = adj.sum(dim=-1).clamp(min=1).pow(-0.5)
adj = deg_inv_sqrt.unsqueeze(-1) * adj * deg_inv_sqrt.unsqueeze(-2)
out = torch.matmul(adj, out)
if self.bias is not None:
out = out + self.bias
if mask is not None:
out = out * mask.view(B, N, 1)
return out
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.
in_channels, self.out_channels)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import math
from torch.nn import Parameter
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_index_put_lift_fresh_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_1(out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
tmp0 = 1.0
tl.store(out_ptr0 + (5 * x0 + 16 * x1), tmp0, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 4
x4 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + x4, xmask)
tmp13 = tl.load(in_ptr0 + (4 * x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = -0.5
tmp10 = libdevice.pow(tmp8, tmp9)
tmp12 = tmp10 * tmp11
tmp15 = tmp13 + tmp14
tmp17 = tmp15 + tmp16
tmp19 = tmp17 + tmp18
tmp20 = triton_helpers.maximum(tmp19, tmp7)
tmp21 = libdevice.pow(tmp20, tmp9)
tmp22 = tmp12 * tmp21
tl.store(out_ptr0 + x4, tmp22, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_index_put_lift_fresh_0[grid(64)](primals_2, buf0,
64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
triton_poi_fused_index_put_lift_fresh_1[grid(16)](buf0, 16, XBLOCK=
16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
primals_3, out=buf2)
del primals_3
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mul_2[grid(64)](buf0, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_3[grid(256)](buf3, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf3
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf5)
del buf2
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_add_4[grid(256)](buf6, primals_4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_4
return buf6, reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0)
def glorot(tensor):
if tensor is not None:
stdv = math.sqrt(6.0 / (tensor.size(-2) + tensor.size(-1)))
tensor.data.uniform_(-stdv, stdv)
def zeros(tensor):
if tensor is not None:
tensor.data.fill_(0)
class DenseGCNConvNew(torch.nn.Module):
"""See :class:`torch_geometric.nn.conv.GCNConv`.
"""
def __init__(self, in_channels, out_channels, improved=False, bias=True):
super(DenseGCNConvNew, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.improved = improved
self.weight = Parameter(torch.Tensor(self.in_channels, out_channels))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
glorot(self.weight)
zeros(self.bias)
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.
in_channels, self.out_channels)
def forward(self, input_0, input_1):
primals_3 = self.weight
primals_4 = self.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
douglasrizzo/pytorch_geometric
|
DenseGCNConv
| false
| 12,292
|
[
"MIT"
] | 0
|
effc617c6ad6daad506038bb79e4407082e74740
|
https://github.com/douglasrizzo/pytorch_geometric/tree/effc617c6ad6daad506038bb79e4407082e74740
|
LayerNormLSTMCell
|
import torch
import torch.distributed
import torch
import torch.nn as nn
import torch.nn.functional as F
class LayerNormLSTMCell(nn.LSTMCell):
def __init__(self, input_size, hidden_size, bias=True):
super().__init__(input_size, hidden_size, bias)
self.ln_ih = nn.LayerNorm(4 * hidden_size)
self.ln_hh = nn.LayerNorm(4 * hidden_size)
self.ln_ho = nn.LayerNorm(hidden_size)
def forward(self, input, hidden=None):
if hidden is None:
hx = input.new_zeros(input.size(0), self.hidden_size,
requires_grad=False)
cx = input.new_zeros(input.size(0), self.hidden_size,
requires_grad=False)
else:
hx, cx = hidden
gates = self.ln_ih(F.linear(input, self.weight_ih, self.bias_ih)
) + self.ln_hh(F.linear(hx, self.weight_hh, self.bias_hh))
i, f, o = gates[:, :3 * self.hidden_size].sigmoid().chunk(3, 1)
g = gates[:, 3 * self.hidden_size:].tanh()
cy = f * cx + i * g
hy = o * self.ln_ho(cy).tanh()
return hy, cy
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.distributed
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_per_fused_add_native_layer_norm_1(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0,
out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp22 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp42 = tl.load(in_ptr2 + r1, None, eviction_policy='evict_last')
tmp44 = tl.load(in_ptr3 + r1, None, eviction_policy='evict_last')
tmp48 = tl.load(in_ptr4 + r1, None, eviction_policy='evict_last')
tmp50 = tl.load(in_ptr5 + r1, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK])
tl.where(xmask, tmp23, 0)
tmp26 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK])
tmp28 = tl.where(xmask, tmp26, 0)
tmp29 = tl.sum(tmp28, 1)[:, None]
tmp30 = tmp29 / tmp9
tmp31 = tmp23 - tmp30
tmp32 = tmp31 * tmp31
tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK])
tmp35 = tl.where(xmask, tmp33, 0)
tmp36 = tl.sum(tmp35, 1)[:, None]
tmp37 = tmp36 / tmp17
tmp38 = tmp37 + tmp19
tmp39 = libdevice.rsqrt(tmp38)
tmp40 = tmp0 - tmp10
tmp41 = tmp40 * tmp21
tmp43 = tmp41 * tmp42
tmp45 = tmp43 + tmp44
tmp46 = tmp22 - tmp30
tmp47 = tmp46 * tmp39
tmp49 = tmp47 * tmp48
tmp51 = tmp49 + tmp50
tmp52 = tmp45 + tmp51
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp39, xmask)
tl.store(out_ptr2 + (r1 + 16 * x0), tmp52, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
tl.store(out_ptr1 + x0, tmp30, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(out_ptr0 + x2, tmp1, xmask)
@triton.jit
def triton_poi_fused_add_mul_tanh_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + 12 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 12 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask)
tmp1 = 0.0
tmp2 = tmp0 * tmp1
tmp5 = libdevice.tanh(tmp4)
tmp6 = tmp3 * tmp5
tmp7 = tmp2 + tmp6
tl.store(out_ptr0 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_4(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_mul_native_layer_norm_tanh_5(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp5 = tmp3 * tmp4
tmp7 = tmp5 * tmp6
tmp9 = tmp7 + tmp8
tmp10 = libdevice.tanh(tmp9)
tmp11 = tmp0 * tmp10
tl.store(out_ptr0 + x2, tmp11, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (16,), (1,))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (16,), (1,))
assert_size_stride(primals_9, (16,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_new_zeros_0[grid(16)](buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(
primals_2, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_2
del primals_3
buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_7, buf0, reinterpret_tensor(primals_6,
(4, 16), (1, 4), 0), alpha=1, beta=1, out=buf6)
del primals_6
del primals_7
buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf5 = reinterpret_tensor(buf3, (4, 1), (1, 1), 0)
del buf3
buf7 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
buf8 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf10 = reinterpret_tensor(buf8, (4, 1), (1, 1), 0)
del buf8
buf11 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
triton_per_fused_add_native_layer_norm_1[grid(4)](buf5, buf10, buf1,
buf6, primals_4, primals_5, primals_8, primals_9, buf2, buf7,
buf11, 4, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf12 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
triton_poi_fused_sigmoid_2[grid(48)](buf11, buf12, 48, XBLOCK=64,
num_warps=1, num_stages=1)
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_tanh_3[grid(16)](buf12, buf11, buf13, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del buf11
buf14 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf15 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_native_layer_norm_4[grid(4)](buf13, buf14, buf15,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_native_layer_norm_tanh_5[grid(16)](buf12,
buf13, buf14, buf15, primals_10, primals_11, buf16, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del buf14
del buf15
return (buf16, buf13, primals_1, primals_4, primals_5, primals_8,
primals_9, primals_10, primals_11, buf0, buf1, buf2, buf5, buf6,
buf7, buf10, reinterpret_tensor(buf12, (4, 4), (12, 1), 0),
reinterpret_tensor(buf12, (4, 4), (12, 1), 8), buf13)
class LayerNormLSTMCellNew(nn.LSTMCell):
def __init__(self, input_size, hidden_size, bias=True):
super().__init__(input_size, hidden_size, bias)
self.ln_ih = nn.LayerNorm(4 * hidden_size)
self.ln_hh = nn.LayerNorm(4 * hidden_size)
self.ln_ho = nn.LayerNorm(hidden_size)
def forward(self, input_0):
primals_2 = self.weight_ih
primals_6 = self.weight_hh
primals_3 = self.bias_ih
primals_4 = self.bias_hh
primals_5 = self.ln_ih.weight
primals_7 = self.ln_ih.bias
primals_8 = self.ln_hh.weight
primals_9 = self.ln_hh.bias
primals_10 = self.ln_ho.weight
primals_11 = self.ln_ho.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0], output[1]
|
dimoteo333/TL-DR
|
LayerNormLSTMCell
| false
| 12,293
|
[
"Apache-2.0"
] | 0
|
b3bebc51e70a48294d7762fa73375cf1bf2ff068
|
https://github.com/dimoteo333/TL-DR/tree/b3bebc51e70a48294d7762fa73375cf1bf2ff068
|
Linear
|
import math
import torch
from torch import Tensor
from torch.nn import Linear
from torch.nn import Parameter
import torch.utils.data
def uniform(size, tensor):
bound = 1.0 / math.sqrt(size)
if tensor is not None:
tensor.data.uniform_(-bound, bound)
def kaiming_uniform(tensor, fan, a):
if tensor is not None:
bound = math.sqrt(6 / ((1 + a ** 2) * fan))
tensor.data.uniform_(-bound, bound)
class Linear(torch.nn.Module):
def __init__(self, in_channels, out_channels, groups=1, bias=True):
super(Linear, self).__init__()
assert in_channels % groups == 0 and out_channels % groups == 0
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
self.weight = Parameter(Tensor(groups, in_channels // groups,
out_channels // groups))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5))
uniform(self.weight.size(1), self.bias)
def forward(self, src):
if self.groups > 1:
size = list(src.size())[:-1]
src = src.view(-1, self.groups, self.in_channels // self.groups)
src = src.transpose(0, 1).contiguous()
out = torch.matmul(src, self.weight)
out = out.transpose(1, 0).contiguous()
out = out.view(*(size + [self.out_channels]))
else:
out = torch.matmul(src, self.weight.squeeze(0))
if self.bias is not None:
out += self.bias
return out
def __repr__(self):
return '{}({}, {}, groups={}, bias={})'.format(self.__class__.
__name__, self.in_channels, self.out_channels, self.groups,
self.bias is not None)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
from torch import Tensor
from torch.nn import Parameter
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_view_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x4, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_add_view_0[grid(256)](buf2, primals_3, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_3
return buf2, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0)
def uniform(size, tensor):
bound = 1.0 / math.sqrt(size)
if tensor is not None:
tensor.data.uniform_(-bound, bound)
def kaiming_uniform(tensor, fan, a):
if tensor is not None:
bound = math.sqrt(6 / ((1 + a ** 2) * fan))
tensor.data.uniform_(-bound, bound)
class LinearNew(torch.nn.Module):
def __init__(self, in_channels, out_channels, groups=1, bias=True):
super(LinearNew, self).__init__()
assert in_channels % groups == 0 and out_channels % groups == 0
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
self.weight = Parameter(Tensor(groups, in_channels // groups,
out_channels // groups))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5))
uniform(self.weight.size(1), self.bias)
def __repr__(self):
return '{}({}, {}, groups={}, bias={})'.format(self.__class__.
__name__, self.in_channels, self.out_channels, self.groups,
self.bias is not None)
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
douglasrizzo/pytorch_geometric
|
Linear
| false
| 12,294
|
[
"MIT"
] | 0
|
effc617c6ad6daad506038bb79e4407082e74740
|
https://github.com/douglasrizzo/pytorch_geometric/tree/effc617c6ad6daad506038bb79e4407082e74740
|
DenseSAGEConv
|
import math
import torch
import torch.nn.functional as F
from torch.nn import Parameter
import torch.utils.data
def uniform(size, tensor):
bound = 1.0 / math.sqrt(size)
if tensor is not None:
tensor.data.uniform_(-bound, bound)
class DenseSAGEConv(torch.nn.Module):
"""See :class:`torch_geometric.nn.conv.SAGEConv`.
"""
def __init__(self, in_channels, out_channels, normalize=False, bias=True):
super(DenseSAGEConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.normalize = normalize
self.weight = Parameter(torch.Tensor(self.in_channels, out_channels))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
uniform(self.in_channels, self.weight)
uniform(self.in_channels, self.bias)
def forward(self, x, adj, mask=None, add_loop=True):
"""
Args:
x (Tensor): Node feature tensor :math:`\\mathbf{X} \\in \\mathbb{R}^{B
\\times N \\times F}`, with batch-size :math:`B`, (maximum)
number of nodes :math:`N` for each graph, and feature
dimension :math:`F`.
adj (Tensor): Adjacency tensor :math:`\\mathbf{A} \\in \\mathbb{R}^{B
\\times N \\times N}`. The adjacency tensor is broadcastable in
the batch dimension, resulting in a shared adjacency matrix for
the complete batch.
mask (BoolTensor, optional): Mask matrix
:math:`\\mathbf{M} \\in {\\{ 0, 1 \\}}^{B \\times N}` indicating
the valid nodes for each graph. (default: :obj:`None`)
add_loop (bool, optional): If set to :obj:`False`, the layer will
not automatically add self-loops to the adjacency matrices.
(default: :obj:`True`)
"""
x = x.unsqueeze(0) if x.dim() == 2 else x
adj = adj.unsqueeze(0) if adj.dim() == 2 else adj
B, N, _ = adj.size()
if add_loop:
adj = adj.clone()
idx = torch.arange(N, dtype=torch.long, device=adj.device)
adj[:, idx, idx] = 1
out = torch.matmul(adj, x)
out = out / adj.sum(dim=-1, keepdim=True).clamp(min=1)
out = torch.matmul(out, self.weight)
if self.bias is not None:
out = out + self.bias
if self.normalize:
out = F.normalize(out, p=2, dim=-1)
if mask is not None:
out = out * mask.view(B, N, 1)
return out
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.
in_channels, self.out_channels)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import math
from torch.nn import Parameter
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_index_put_lift_fresh_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_1(out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
tmp0 = 1.0
tl.store(out_ptr0 + (5 * x0 + 16 * x1), tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_clamp_div_sum_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 1.0
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tmp0 / tmp9
tl.store(in_out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_index_put_lift_fresh_0[grid(64)](primals_2, buf0,
64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
triton_poi_fused_index_put_lift_fresh_1[grid(16)](buf0, 16, XBLOCK=
16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(256)](buf0, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0),
out=buf3)
del primals_1
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
triton_poi_fused_clamp_div_sum_3[grid(256)](buf4, buf0, 256, XBLOCK
=128, num_warps=4, num_stages=1)
del buf0
buf5 = reinterpret_tensor(buf2, (64, 4), (4, 1), 0)
del buf2
extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0),
primals_3, out=buf5)
del primals_3
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_add_4[grid(256)](buf6, primals_4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_4
return buf6, reinterpret_tensor(buf4, (4, 64), (1, 4), 0)
def uniform(size, tensor):
bound = 1.0 / math.sqrt(size)
if tensor is not None:
tensor.data.uniform_(-bound, bound)
class DenseSAGEConvNew(torch.nn.Module):
"""See :class:`torch_geometric.nn.conv.SAGEConv`.
"""
def __init__(self, in_channels, out_channels, normalize=False, bias=True):
super(DenseSAGEConvNew, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.normalize = normalize
self.weight = Parameter(torch.Tensor(self.in_channels, out_channels))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
uniform(self.in_channels, self.weight)
uniform(self.in_channels, self.bias)
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.
in_channels, self.out_channels)
def forward(self, input_0, input_1):
primals_3 = self.weight
primals_4 = self.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
douglasrizzo/pytorch_geometric
|
DenseSAGEConv
| false
| 12,295
|
[
"MIT"
] | 0
|
effc617c6ad6daad506038bb79e4407082e74740
|
https://github.com/douglasrizzo/pytorch_geometric/tree/effc617c6ad6daad506038bb79e4407082e74740
|
Gate
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.nn.functional as F
class Gate(nn.Module):
def __init__(self, args):
super(Gate, self).__init__()
self.d_model = args.d_model
self.weight_proj = nn.Linear(2 * self.d_model, 1)
self.tanh = nn.Tanh()
def forward(self, featureA, featureB):
feature = torch.cat([featureA, featureB], dim=-1)
att = self.tanh(self.weight_proj(feature))
gate_score = F.sigmoid(att)
gate_score = gate_score.repeat(1, 1, self.d_model)
return gate_score * featureA + (1 - gate_score) * featureB
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'args': _mock_config(d_model=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_mul_repeat_rsub_sigmoid_tanh_1(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp7 = tl.load(in_ptr2 + x2, xmask)
tmp1 = libdevice.tanh(tmp0)
tmp2 = tl.sigmoid(tmp1)
tmp4 = tmp2 * tmp3
tmp5 = 1.0
tmp6 = tmp5 - tmp2
tmp8 = tmp6 * tmp7
tmp9 = tmp4 + tmp8
tl.store(out_ptr0 + x2, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1, 8), (8, 1))
assert_size_stride(primals_4, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_1, primals_2, buf0, 128,
XBLOCK=128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (16, 8), (
8, 1), 0), reinterpret_tensor(primals_3, (8, 1), (1, 8), 0),
alpha=1, beta=1, out=buf2)
del primals_3
del primals_4
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_mul_repeat_rsub_sigmoid_tanh_1[grid(64)](buf2,
primals_1, primals_2, buf3, 64, XBLOCK=64, num_warps=1,
num_stages=1)
return buf3, primals_1, primals_2, reinterpret_tensor(buf0, (16, 8), (8,
1), 0), buf2
class GateNew(nn.Module):
def __init__(self, args):
super(GateNew, self).__init__()
self.d_model = args.d_model
self.weight_proj = nn.Linear(2 * self.d_model, 1)
self.tanh = nn.Tanh()
def forward(self, input_0, input_1):
primals_3 = self.weight_proj.weight
primals_4 = self.weight_proj.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
djz233/GraphMask
|
Gate
| false
| 12,296
|
[
"MIT"
] | 0
|
4b699a1685f0d26973bb90cd75b09d74726cdc2f
|
https://github.com/djz233/GraphMask/tree/4b699a1685f0d26973bb90cd75b09d74726cdc2f
|
TransformerDecoderLayer
|
import torch
import torch.nn as nn
from torch.nn import functional as F
import torch._utils
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == 'relu':
return F.relu
if activation == 'gelu':
return F.gelu
if activation == 'glu':
return F.glu
raise RuntimeError(f'activation should be relu/gelu, not {activation}.')
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, dim_feedforward=None, nhead=1, dropout=0.1,
activation='relu'):
super().__init__()
if dim_feedforward is None:
dim_feedforward = d_model
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout
=dropout)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
def forward(self, query, memory, position_embedding=None):
if position_embedding is not None:
query = torch.cat([query, position_embedding.flatten(2).permute
(1, 0, 2)], dim=0)
memory = torch.cat([memory, position_embedding.flatten(2).
permute(1, 0, 2)], dim=0)
tgt = self.multihead_attn(query=query, key=memory, value=memory)[0]
tgt = memory + self.dropout1(tgt)
tgt = self.norm1(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
return tgt
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'d_model': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
from torch.nn import functional as F
import torch._utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_7(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (12, 4), (4, 1))
assert_size_stride(primals_4, (12,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_3, (4, 4),
(1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_4, (4,), (1,), 4),
primals_2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 16),
alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_4, (4,), (1,), 8),
primals_2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 32),
alpha=1, beta=1, out=buf2)
del primals_3
buf3 = reinterpret_tensor(buf0, (1, 4, 4), (16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](buf3, primals_4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (1, 4, 4), (4, 1,
4), 0), out=buf4)
buf5 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(16)](buf4, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused__softmax_2[grid(16)](buf5, buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf7 = buf5
del buf5
extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (1, 4, 4), (4, 4,
1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf7, (4, 4), (4,
1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf8)
del primals_6
buf9 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf10 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_add_native_layer_norm_3[grid(4)](primals_2, buf8,
buf9, buf10, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_4[grid(16)](primals_2, buf8,
buf9, buf10, primals_7, primals_8, buf11, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_8
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf11, reinterpret_tensor(primals_9, (4, 4), (1,
4), 0), out=buf12)
buf13 = buf12
del buf12
triton_poi_fused_relu_5[grid(16)](buf13, primals_10, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_10
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf13, reinterpret_tensor(primals_11, (4, 4), (1,
4), 0), out=buf14)
buf15 = buf14
del buf14
triton_poi_fused_add_6[grid(16)](buf15, buf11, primals_12, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_12
buf16 = buf9
del buf9
buf17 = buf10
del buf10
triton_poi_fused_native_layer_norm_7[grid(4)](buf15, buf16, buf17,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_8[grid(16)](buf15, buf16, buf17,
primals_13, primals_14, buf18, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf16
del buf17
del primals_14
return (buf18, primals_2, primals_7, primals_13, primals_1, buf6,
reinterpret_tensor(buf7, (4, 4), (4, 1), 0), buf8, buf11, buf13,
buf15, primals_11, primals_9, primals_5, reinterpret_tensor(buf2, (
1, 4, 4), (4, 1, 4), 0), reinterpret_tensor(buf3, (1, 4, 4), (16, 1,
4), 0), reinterpret_tensor(buf1, (1, 4, 4), (4, 4, 1), 0))
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == 'relu':
return F.relu
if activation == 'gelu':
return F.gelu
if activation == 'glu':
return F.glu
raise RuntimeError(f'activation should be relu/gelu, not {activation}.')
class TransformerDecoderLayerNew(nn.Module):
def __init__(self, d_model, dim_feedforward=None, nhead=1, dropout=0.1,
activation='relu'):
super().__init__()
if dim_feedforward is None:
dim_feedforward = d_model
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout
=dropout)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
def forward(self, input_0, input_1):
primals_3 = self.multihead_attn.in_proj_weight
primals_4 = self.multihead_attn.in_proj_bias
primals_1 = self.multihead_attn.out_proj.weight
primals_6 = self.multihead_attn.out_proj.bias
primals_2 = self.linear1.weight
primals_7 = self.linear1.bias
primals_5 = self.linear2.weight
primals_8 = self.linear2.bias
primals_10 = self.norm1.weight
primals_12 = self.norm1.bias
primals_13 = self.norm2.weight
primals_14 = self.norm2.bias
primals_9 = input_0
primals_11 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0]
|
dingmyu/mmclassification
|
TransformerDecoderLayer
| false
| 12,297
|
[
"Apache-2.0"
] | 0
|
c600b22907fb9423899f7c308c659168c2d01cd8
|
https://github.com/dingmyu/mmclassification/tree/c600b22907fb9423899f7c308c659168c2d01cd8
|
Envelope
|
import torch
import torch.utils.data
class Envelope(torch.nn.Module):
def __init__(self, exponent):
super(Envelope, self).__init__()
self.p = exponent
self.a = -(self.p + 1) * (self.p + 2) / 2
self.b = self.p * (self.p + 2)
self.c = -self.p * (self.p + 1) / 2
def forward(self, x):
p, a, b, c = self.p, self.a, self.b, self.c
x_pow_p0 = x.pow(p)
x_pow_p1 = x_pow_p0 * x
return 1.0 / x + a * x_pow_p0 + b * x_pow_p1 + c * x_pow_p1 * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'exponent': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_pow_reciprocal_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp1 / tmp0
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp5 = tmp0 * tmp0
tmp6 = tmp5 * tmp5
tmp7 = -15.0
tmp8 = tmp6 * tmp7
tmp9 = tmp4 + tmp8
tmp10 = tmp6 * tmp0
tmp11 = 24.0
tmp12 = tmp10 * tmp11
tmp13 = tmp9 + tmp12
tmp14 = -10.0
tmp15 = tmp10 * tmp14
tmp16 = tmp15 * tmp0
tmp17 = tmp13 + tmp16
tl.store(out_ptr0 + x0, tmp17, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_pow_reciprocal_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class EnvelopeNew(torch.nn.Module):
def __init__(self, exponent):
super(EnvelopeNew, self).__init__()
self.p = exponent
self.a = -(self.p + 1) * (self.p + 2) / 2
self.b = self.p * (self.p + 2)
self.c = -self.p * (self.p + 1) / 2
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
douglasrizzo/pytorch_geometric
|
Envelope
| false
| 12,298
|
[
"MIT"
] | 0
|
effc617c6ad6daad506038bb79e4407082e74740
|
https://github.com/douglasrizzo/pytorch_geometric/tree/effc617c6ad6daad506038bb79e4407082e74740
|
CategoricalSampler
|
import torch
import torch.nn as nn
class Sampler(nn.Module):
""" args; logits: (batch, n_nodes)
return; next_node: (batch, 1)
TopKSampler <=> greedy; sample one with biggest probability
CategoricalSampler <=> sampling; randomly sample one from possible distribution based on probability
"""
def __init__(self, n_samples=1, **kwargs):
super().__init__(**kwargs)
self.n_samples = n_samples
class CategoricalSampler(Sampler):
def forward(self, logits):
return torch.multinomial(logits.exp(), self.n_samples)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_exp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl_math.exp(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_exp_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
buf1 = torch.ops.aten.multinomial.default(buf0, 1)
del buf0
buf2 = buf1
del buf1
return buf2,
class Sampler(nn.Module):
""" args; logits: (batch, n_nodes)
return; next_node: (batch, 1)
TopKSampler <=> greedy; sample one with biggest probability
CategoricalSampler <=> sampling; randomly sample one from possible distribution based on probability
"""
def __init__(self, n_samples=1, **kwargs):
super().__init__(**kwargs)
self.n_samples = n_samples
class CategoricalSamplerNew(Sampler):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
daunfamily/VRP_MHA
|
CategoricalSampler
| false
| 12,299
|
[
"MIT"
] | 0
|
9c23d181d11dbbacac01299c6e8931b8e266b9b4
|
https://github.com/daunfamily/VRP_MHA/tree/9c23d181d11dbbacac01299c6e8931b8e266b9b4
|
Attention
|
import math
import torch
import torch.nn.functional as F
import torch.utils.data
def restricted_softmax(src, dim=-1, margin=0):
src_max = torch.clamp(src.max(dim=dim, keepdim=True)[0], min=0)
out = (src - src_max).exp()
out = out / (out.sum(dim=dim, keepdim=True) + (margin - src_max).exp())
return out
class Attention(torch.nn.Module):
def __init__(self, dropout=0):
super(Attention, self).__init__()
self.dropout = dropout
def forward(self, query, key, value):
assert query.dim() == key.dim() == value.dim() >= 2
assert query.size(-1) == key.size(-1)
assert key.size(-2) == value.size(-2)
score = torch.matmul(query, key.transpose(-2, -1))
score = score / math.sqrt(key.size(-1))
score = restricted_softmax(score, dim=-1)
score = F.dropout(score, p=self.dropout, training=self.training)
return torch.matmul(score, value)
def __repr__(self):
return '{}(dropout={})'.format(self.__class__.__name__, self.dropout)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clamp_div_exp_max_sub_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = 0.0
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = tmp2 - tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp11 = tmp10 * tmp8
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp13 * tmp8
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp17 = tmp16 * tmp8
tmp18 = triton_helpers.maximum(tmp15, tmp17)
tmp19 = 0.0
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tmp19 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp6 + tmp22
tl.store(out_ptr0 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2(in_out_ptr0, in_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 / tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 1, 4), 0),
out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_div_exp_max_sub_0[grid(256)](buf0, buf1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1[grid(64)](buf1,
buf0, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf3 = buf1
del buf1
triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2[grid(256)](buf3,
buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf2
buf4 = buf0
del buf0
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4
)
del arg2_1
del buf3
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0),
def restricted_softmax(src, dim=-1, margin=0):
src_max = torch.clamp(src.max(dim=dim, keepdim=True)[0], min=0)
out = (src - src_max).exp()
out = out / (out.sum(dim=dim, keepdim=True) + (margin - src_max).exp())
return out
class AttentionNew(torch.nn.Module):
def __init__(self, dropout=0):
super(AttentionNew, self).__init__()
self.dropout = dropout
def __repr__(self):
return '{}(dropout={})'.format(self.__class__.__name__, self.dropout)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
douglasrizzo/pytorch_geometric
|
Attention
| false
| 12,300
|
[
"MIT"
] | 0
|
effc617c6ad6daad506038bb79e4407082e74740
|
https://github.com/douglasrizzo/pytorch_geometric/tree/effc617c6ad6daad506038bb79e4407082e74740
|
Model
|
import torch
from torch import nn
class Model(nn.Module):
def __init__(self, input_size, dropout=0.5):
super(Model, self).__init__()
self.dropout = dropout
if self.dropout > 0:
self.dropout = nn.Dropout(dropout)
self.encode_w1 = nn.Linear(input_size, 64)
self.encode_w2 = nn.Linear(64, 32)
self.decode_w1 = nn.Linear(32, 64)
self.decode_w2 = nn.Linear(64, input_size)
def encoder(self, x):
x = self.encode_w1(x)
x = torch.relu(x)
x = self.encode_w2(x)
x = torch.relu(x)
if self.dropout:
x = self.dropout(x)
return x
def decoder(self, x):
x = self.decode_w1(x)
x = torch.relu(x)
x = self.decode_w2(x)
return x
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (64, 4), (4, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (32, 64), (64, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (64, 32), (32, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (4, 64), (64, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf0
buf9 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool
)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf1,
primals_2, buf9, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0),
reinterpret_tensor(primals_4, (64, 32), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 32), (512, 128, 32, 1), 0)
del buf2
buf8 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(2048)](buf3,
primals_5, buf8, 2048, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 32), (32, 1), 0),
reinterpret_tensor(primals_6, (32, 64), (1, 32), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf4
buf7 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool
)
triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf5,
primals_7, buf7, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 64),
(64, 1), 0), reinterpret_tensor(primals_8, (64, 4), (1, 64), 0),
alpha=1, beta=1, out=buf6)
del primals_9
return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(
buf3, (64, 32), (32, 1), 0), reinterpret_tensor(buf5, (64, 64), (64,
1), 0), primals_8, buf7, primals_6, buf8, primals_4, buf9
class ModelNew(nn.Module):
def __init__(self, input_size, dropout=0.5):
super(ModelNew, self).__init__()
self.dropout = dropout
if self.dropout > 0:
self.dropout = nn.Dropout(dropout)
self.encode_w1 = nn.Linear(input_size, 64)
self.encode_w2 = nn.Linear(64, 32)
self.decode_w1 = nn.Linear(32, 64)
self.decode_w2 = nn.Linear(64, input_size)
def encoder(self, x):
x = self.encode_w1(x)
x = torch.relu(x)
x = self.encode_w2(x)
x = torch.relu(x)
if self.dropout:
x = self.dropout(x)
return x
def decoder(self, x):
x = self.decode_w1(x)
x = torch.relu(x)
x = self.decode_w2(x)
return x
def forward(self, input_0):
primals_1 = self.encode_w1.weight
primals_2 = self.encode_w1.bias
primals_4 = self.encode_w2.weight
primals_5 = self.encode_w2.bias
primals_6 = self.decode_w1.weight
primals_7 = self.decode_w1.bias
primals_8 = self.decode_w2.weight
primals_9 = self.decode_w2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
dohnlee/qufa2021
|
Model
| false
| 12,301
|
[
"MIT"
] | 0
|
5fb42caee09ec228358e49768e32c75e3c0094ce
|
https://github.com/dohnlee/qufa2021/tree/5fb42caee09ec228358e49768e32c75e3c0094ce
|
MaxPoolPad
|
import torch
import torch.nn as nn
import torch.nn.init
class MaxPoolPad(nn.Module):
def __init__(self):
super(MaxPoolPad, self).__init__()
self.pad = nn.ZeroPad2d((1, 0, 1, 0))
self.pool = nn.MaxPool2d(3, stride=2, padding=1)
def forward(self, x):
x = self.pad(x)
x = self.pool(x)
x = x[:, :, 1:, 1:]
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0(in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 3 % 3
x0 = xindex % 3
x2 = xindex // 9
x4 = xindex
tmp0 = -1 + 2 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 5, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = -2 + 2 * x1
tmp12 = tmp11 >= tmp1
tmp13 = -2 + 2 * x0
tmp14 = tmp13 >= tmp1
tmp15 = tmp12 & tmp14
tmp16 = tmp15 & tmp10
tmp17 = tl.load(in_ptr0 + (-10 + 2 * x0 + 8 * x1 + 16 * x2), tmp16 &
xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.full(tmp17.shape, float('-inf'), tmp17.dtype)
tmp19 = tl.where(tmp10, tmp17, tmp18)
tmp20 = 2 * x0
tmp21 = tmp20 >= tmp1
tmp22 = tmp20 < tmp3
tmp23 = tmp21 & tmp22
tmp24 = tmp5 & tmp23
tmp25 = tmp12 & tmp7
tmp26 = tmp25 & tmp24
tmp27 = tl.load(in_ptr0 + (-9 + 2 * x0 + 8 * x1 + 16 * x2), tmp26 &
xmask, eviction_policy='evict_last', other=0.0)
tmp28 = tl.full(tmp27.shape, float('-inf'), tmp27.dtype)
tmp29 = tl.where(tmp24, tmp27, tmp28)
tmp30 = triton_helpers.maximum(tmp29, tmp19)
tmp31 = 1 + 2 * x0
tmp32 = tmp31 >= tmp1
tmp33 = tmp31 < tmp3
tmp34 = tmp32 & tmp33
tmp35 = tmp5 & tmp34
tmp36 = tmp12 & tmp21
tmp37 = tmp36 & tmp35
tmp38 = tl.load(in_ptr0 + (-8 + 2 * x0 + 8 * x1 + 16 * x2), tmp37 &
xmask, eviction_policy='evict_last', other=0.0)
tmp39 = tl.full(tmp38.shape, float('-inf'), tmp38.dtype)
tmp40 = tl.where(tmp35, tmp38, tmp39)
tmp41 = triton_helpers.maximum(tmp40, tmp30)
tmp42 = 2 * x1
tmp43 = tmp42 >= tmp1
tmp44 = tmp42 < tmp3
tmp45 = tmp43 & tmp44
tmp46 = tmp45 & tmp9
tmp47 = tmp2 & tmp14
tmp48 = tmp47 & tmp46
tmp49 = tl.load(in_ptr0 + (-6 + 2 * x0 + 8 * x1 + 16 * x2), tmp48 &
xmask, eviction_policy='evict_last', other=0.0)
tmp50 = tl.full(tmp49.shape, float('-inf'), tmp49.dtype)
tmp51 = tl.where(tmp46, tmp49, tmp50)
tmp52 = triton_helpers.maximum(tmp51, tmp41)
tmp53 = tmp45 & tmp23
tmp54 = tmp2 & tmp7
tmp55 = tmp54 & tmp53
tmp56 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x1 + 16 * x2), tmp55 &
xmask, eviction_policy='evict_last', other=0.0)
tmp57 = tl.full(tmp56.shape, float('-inf'), tmp56.dtype)
tmp58 = tl.where(tmp53, tmp56, tmp57)
tmp59 = triton_helpers.maximum(tmp58, tmp52)
tmp60 = tmp45 & tmp34
tmp61 = tmp2 & tmp21
tmp62 = tmp61 & tmp60
tmp63 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x1 + 16 * x2), tmp62 &
xmask, eviction_policy='evict_last', other=0.0)
tmp64 = tl.full(tmp63.shape, float('-inf'), tmp63.dtype)
tmp65 = tl.where(tmp60, tmp63, tmp64)
tmp66 = triton_helpers.maximum(tmp65, tmp59)
tmp67 = 1 + 2 * x1
tmp68 = tmp67 >= tmp1
tmp69 = tmp67 < tmp3
tmp70 = tmp68 & tmp69
tmp71 = tmp70 & tmp9
tmp72 = tmp43 & tmp14
tmp73 = tmp72 & tmp71
tmp74 = tl.load(in_ptr0 + (-2 + 2 * x0 + 8 * x1 + 16 * x2), tmp73 &
xmask, eviction_policy='evict_last', other=0.0)
tmp75 = tl.full(tmp74.shape, float('-inf'), tmp74.dtype)
tmp76 = tl.where(tmp71, tmp74, tmp75)
tmp77 = triton_helpers.maximum(tmp76, tmp66)
tmp78 = tmp70 & tmp23
tmp79 = tmp43 & tmp7
tmp80 = tmp79 & tmp78
tmp81 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x1 + 16 * x2), tmp80 &
xmask, eviction_policy='evict_last', other=0.0)
tmp82 = tl.full(tmp81.shape, float('-inf'), tmp81.dtype)
tmp83 = tl.where(tmp78, tmp81, tmp82)
tmp84 = triton_helpers.maximum(tmp83, tmp77)
tmp85 = tmp70 & tmp34
tmp86 = tmp43 & tmp21
tmp87 = tmp86 & tmp85
tmp88 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2), tmp87 & xmask,
eviction_policy='evict_last', other=0.0)
tmp89 = tl.full(tmp88.shape, float('-inf'), tmp88.dtype)
tmp90 = tl.where(tmp85, tmp88, tmp89)
tmp91 = triton_helpers.maximum(tmp90, tmp84)
tl.store(out_ptr0 + x4, tmp91, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_max_pool2d_with_indices_0[grid(144)](
arg0_1, buf0, 144, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 4, 2, 2), (36, 9, 3, 1), 4),
class MaxPoolPadNew(nn.Module):
def __init__(self):
super(MaxPoolPadNew, self).__init__()
self.pad = nn.ZeroPad2d((1, 0, 1, 0))
self.pool = nn.MaxPool2d(3, stride=2, padding=1)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
dowhilefalse/DeOldify
|
MaxPoolPad
| false
| 12,302
|
[
"MIT"
] | 0
|
08f012cdbe36e3f8482460f57e1844b361a7fb16
|
https://github.com/dowhilefalse/DeOldify/tree/08f012cdbe36e3f8482460f57e1844b361a7fb16
|
DenseGraphConv
|
import math
import torch
from torch.nn import Parameter
import torch.utils.data
def uniform(size, tensor):
bound = 1.0 / math.sqrt(size)
if tensor is not None:
tensor.data.uniform_(-bound, bound)
class DenseGraphConv(torch.nn.Module):
"""See :class:`torch_geometric.nn.conv.GraphConv`.
"""
def __init__(self, in_channels, out_channels, aggr='add', bias=True):
assert aggr in ['add', 'mean', 'max']
super(DenseGraphConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.aggr = aggr
self.weight = Parameter(torch.Tensor(in_channels, out_channels))
self.lin = torch.nn.Linear(in_channels, out_channels, bias=bias)
self.reset_parameters()
def reset_parameters(self):
uniform(self.in_channels, self.weight)
self.lin.reset_parameters()
def forward(self, x, adj, mask=None):
"""
Args:
x (Tensor): Node feature tensor :math:`\\mathbf{X} \\in \\mathbb{R}^{B
\\times N \\times F}`, with batch-size :math:`B`, (maximum)
number of nodes :math:`N` for each graph, and feature
dimension :math:`F`.
adj (Tensor): Adjacency tensor :math:`\\mathbf{A} \\in \\mathbb{R}^{B
\\times N \\times N}`. The adjacency tensor is broadcastable in
the batch dimension, resulting in a shared adjacency matrix for
the complete batch.
mask (BoolTensor, optional): Mask matrix
:math:`\\mathbf{M} \\in {\\{ 0, 1 \\}}^{B \\times N}` indicating
the valid nodes for each graph. (default: :obj:`None`)
"""
x = x.unsqueeze(0) if x.dim() == 2 else x
adj = adj.unsqueeze(0) if adj.dim() == 2 else adj
B, N, _ = adj.size()
out = torch.matmul(adj, x)
out = torch.matmul(out, self.weight)
if self.aggr == 'mean':
out = out / adj.sum(dim=-1, keepdim=True).clamp(min=1)
elif self.aggr == 'max':
out = out.max(dim=-1)[0]
out = out + self.lin(x)
if mask is not None:
out = out * mask.view(B, N, 1)
return out
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.
in_channels, self.out_channels)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
from torch.nn import Parameter
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](primals_2, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0),
out=buf1)
buf2 = reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
del buf0
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
primals_3, out=buf2)
del primals_3
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
del primals_4
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_add_1[grid(256)](buf4, buf3, primals_5, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del buf3
del primals_5
return buf4, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (4, 64), (1, 4), 0)
def uniform(size, tensor):
bound = 1.0 / math.sqrt(size)
if tensor is not None:
tensor.data.uniform_(-bound, bound)
class DenseGraphConvNew(torch.nn.Module):
"""See :class:`torch_geometric.nn.conv.GraphConv`.
"""
def __init__(self, in_channels, out_channels, aggr='add', bias=True):
assert aggr in ['add', 'mean', 'max']
super(DenseGraphConvNew, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.aggr = aggr
self.weight = Parameter(torch.Tensor(in_channels, out_channels))
self.lin = torch.nn.Linear(in_channels, out_channels, bias=bias)
self.reset_parameters()
def reset_parameters(self):
uniform(self.in_channels, self.weight)
self.lin.reset_parameters()
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.
in_channels, self.out_channels)
def forward(self, input_0, input_1):
primals_3 = self.weight
primals_4 = self.lin.weight
primals_5 = self.lin.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
douglasrizzo/pytorch_geometric
|
DenseGraphConv
| false
| 12,303
|
[
"MIT"
] | 0
|
effc617c6ad6daad506038bb79e4407082e74740
|
https://github.com/douglasrizzo/pytorch_geometric/tree/effc617c6ad6daad506038bb79e4407082e74740
|
SelfAttentionUnit
|
import torch
from torch import nn
class SelfAttentionUnit(nn.Module):
def __init__(self, embed_dim, num_heads, max_len, dropout=0.8, bias=
False, skip_connection=True):
super(SelfAttentionUnit, self).__init__()
self.skip_connection = skip_connection
self.attn = nn.MultiheadAttention(embed_dim=embed_dim, num_heads=
num_heads, dropout=dropout, bias=bias)
self.act = nn.ReLU()
self.ln = nn.LayerNorm([max_len, embed_dim])
def forward(self, x):
x = x.permute(1, 0, 2)
res, _ = self.attn(key=x, value=x, query=x)
res = self.act(res)
if self.skip_connection:
res = res + x
res = res.permute(1, 0, 2)
return self.ln(res)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4, 'num_heads': 4, 'max_len': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (12 * (x0 // 4) + 48 * x1 + x0 % 4), xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 12 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_per_fused_native_layer_norm_native_layer_norm_backward_6(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex % 4
r2 = rindex // 4
x0 = xindex
r3 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0 + 16 * r2), xmask, other=0.0)
tmp3 = tl.load(in_ptr1 + (r3 + 16 * x0), xmask, other=0.0)
tmp28 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr3 + r3, None, eviction_policy='evict_last')
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = tmp2 + tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tl.where(xmask, tmp5, 0)
tmp8 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp13 = tmp12.to(tl.float32)
tmp14 = tmp11 / tmp13
tmp15 = tmp5 - tmp14
tmp16 = tmp15 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.where(xmask, tmp17, 0)
tmp20 = tl.sum(tmp19, 1)[:, None]
tmp21 = tmp4 - tmp14
tmp22 = 16.0
tmp23 = tmp20 / tmp22
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = libdevice.rsqrt(tmp25)
tmp27 = tmp21 * tmp26
tmp29 = tmp27 * tmp28
tmp31 = tmp29 + tmp30
tmp32 = 0.0625
tmp33 = tmp26 * tmp32
tl.store(out_ptr2 + (r3 + 16 * x0), tmp27, xmask)
tl.store(out_ptr3 + (r3 + 16 * x0), tmp31, xmask)
tl.store(out_ptr4 + x0, tmp33, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_7(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 12), (1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((16, 4, 1), (1, 16, 64), torch.float32)
triton_poi_fused_mul_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((3, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(192)](buf1, buf3, 192, XBLOCK=256,
num_warps=4, num_stages=1)
del buf1
buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf2, reinterpret_tensor(buf3, (16, 1, 4), (1, 0,
16), 64), out=buf4)
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(256)](buf4, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused__softmax_4[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
buf7 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf3, (16, 4, 1), (1,
16, 0), 128), out=buf7)
buf8 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
triton_poi_fused_clone_5[grid(4, 16)](buf7, buf8, 4, 16, XBLOCK=16,
YBLOCK=4, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0)
del buf7
extern_kernels.mm(reinterpret_tensor(buf8, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf9)
buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf15 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
triton_per_fused_native_layer_norm_native_layer_norm_backward_6[grid(4)
](buf9, primals_1, primals_4, primals_5, buf13, buf14, buf15, 4,
16, XBLOCK=1, num_warps=2, num_stages=1)
del primals_1
del primals_5
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_7[grid(64)](buf9, buf16,
64, XBLOCK=64, num_warps=1, num_stages=1)
del buf9
return buf14, primals_4, reinterpret_tensor(buf0, (16, 4), (4, 1), 0
), buf6, reinterpret_tensor(buf8, (16, 4), (4, 1), 0
), buf13, buf15, buf16, primals_3, reinterpret_tensor(buf3, (16, 1,
4), (1, 1, 16), 128), reinterpret_tensor(buf2, (16, 1, 4), (1, 1,
16), 0), reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 1), 64)
class SelfAttentionUnitNew(nn.Module):
def __init__(self, embed_dim, num_heads, max_len, dropout=0.8, bias=
False, skip_connection=True):
super(SelfAttentionUnitNew, self).__init__()
self.skip_connection = skip_connection
self.attn = nn.MultiheadAttention(embed_dim=embed_dim, num_heads=
num_heads, dropout=dropout, bias=bias)
self.act = nn.ReLU()
self.ln = nn.LayerNorm([max_len, embed_dim])
def forward(self, input_0):
primals_2 = self.attn.in_proj_weight
primals_3 = self.attn.out_proj.weight
primals_4 = self.ln.weight
primals_5 = self.ln.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
dohnlee/qufa2021
|
SelfAttentionUnit
| false
| 12,304
|
[
"MIT"
] | 0
|
5fb42caee09ec228358e49768e32c75e3c0094ce
|
https://github.com/dohnlee/qufa2021/tree/5fb42caee09ec228358e49768e32c75e3c0094ce
|
AvgPoolPad
|
import torch
import torch.nn as nn
import torch.nn.init
class AvgPoolPad(nn.Module):
def __init__(self, stride=2, padding=1):
super(AvgPoolPad, self).__init__()
self.pad = nn.ZeroPad2d((1, 0, 1, 0))
self.pool = nn.AvgPool2d(3, stride=stride, padding=padding,
count_include_pad=False)
def forward(self, x):
x = self.pad(x)
x = self.pool(x)
x = x[:, :, 1:, 1:]
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_avg_pool2d_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 3 % 3
x0 = xindex % 3
x2 = xindex // 9
x4 = xindex
tmp0 = -1 + 2 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 5, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = -2 + 2 * x1
tmp12 = tmp11 >= tmp1
tmp13 = -2 + 2 * x0
tmp14 = tmp13 >= tmp1
tmp15 = tmp12 & tmp14
tmp16 = tmp15 & tmp10
tmp17 = tl.load(in_ptr0 + (-10 + 2 * x0 + 8 * x1 + 16 * x2), tmp16 &
xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp10, tmp17, tmp18)
tmp20 = 2 * x0
tmp21 = tmp20 >= tmp1
tmp22 = tmp20 < tmp3
tmp23 = tmp21 & tmp22
tmp24 = tmp5 & tmp23
tmp25 = tmp12 & tmp7
tmp26 = tmp25 & tmp24
tmp27 = tl.load(in_ptr0 + (-9 + 2 * x0 + 8 * x1 + 16 * x2), tmp26 &
xmask, eviction_policy='evict_last', other=0.0)
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp24, tmp27, tmp28)
tmp30 = tmp29 + tmp19
tmp31 = 1 + 2 * x0
tmp32 = tmp31 >= tmp1
tmp33 = tmp31 < tmp3
tmp34 = tmp32 & tmp33
tmp35 = tmp5 & tmp34
tmp36 = tmp12 & tmp21
tmp37 = tmp36 & tmp35
tmp38 = tl.load(in_ptr0 + (-8 + 2 * x0 + 8 * x1 + 16 * x2), tmp37 &
xmask, eviction_policy='evict_last', other=0.0)
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp35, tmp38, tmp39)
tmp41 = tmp40 + tmp30
tmp42 = 2 * x1
tmp43 = tmp42 >= tmp1
tmp44 = tmp42 < tmp3
tmp45 = tmp43 & tmp44
tmp46 = tmp45 & tmp9
tmp47 = tmp2 & tmp14
tmp48 = tmp47 & tmp46
tmp49 = tl.load(in_ptr0 + (-6 + 2 * x0 + 8 * x1 + 16 * x2), tmp48 &
xmask, eviction_policy='evict_last', other=0.0)
tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype)
tmp51 = tl.where(tmp46, tmp49, tmp50)
tmp52 = tmp51 + tmp41
tmp53 = tmp45 & tmp23
tmp54 = tmp2 & tmp7
tmp55 = tmp54 & tmp53
tmp56 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x1 + 16 * x2), tmp55 &
xmask, eviction_policy='evict_last', other=0.0)
tmp57 = tl.full(tmp56.shape, 0.0, tmp56.dtype)
tmp58 = tl.where(tmp53, tmp56, tmp57)
tmp59 = tmp58 + tmp52
tmp60 = tmp45 & tmp34
tmp61 = tmp2 & tmp21
tmp62 = tmp61 & tmp60
tmp63 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x1 + 16 * x2), tmp62 &
xmask, eviction_policy='evict_last', other=0.0)
tmp64 = tl.full(tmp63.shape, 0.0, tmp63.dtype)
tmp65 = tl.where(tmp60, tmp63, tmp64)
tmp66 = tmp65 + tmp59
tmp67 = 1 + 2 * x1
tmp68 = tmp67 >= tmp1
tmp69 = tmp67 < tmp3
tmp70 = tmp68 & tmp69
tmp71 = tmp70 & tmp9
tmp72 = tmp43 & tmp14
tmp73 = tmp72 & tmp71
tmp74 = tl.load(in_ptr0 + (-2 + 2 * x0 + 8 * x1 + 16 * x2), tmp73 &
xmask, eviction_policy='evict_last', other=0.0)
tmp75 = tl.full(tmp74.shape, 0.0, tmp74.dtype)
tmp76 = tl.where(tmp71, tmp74, tmp75)
tmp77 = tmp76 + tmp66
tmp78 = tmp70 & tmp23
tmp79 = tmp43 & tmp7
tmp80 = tmp79 & tmp78
tmp81 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x1 + 16 * x2), tmp80 &
xmask, eviction_policy='evict_last', other=0.0)
tmp82 = tl.full(tmp81.shape, 0.0, tmp81.dtype)
tmp83 = tl.where(tmp78, tmp81, tmp82)
tmp84 = tmp83 + tmp77
tmp85 = tmp70 & tmp34
tmp86 = tmp43 & tmp21
tmp87 = tmp86 & tmp85
tmp88 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2), tmp87 & xmask,
eviction_policy='evict_last', other=0.0)
tmp89 = tl.full(tmp88.shape, 0.0, tmp88.dtype)
tmp90 = tl.where(tmp85, tmp88, tmp89)
tmp91 = tmp90 + tmp84
tmp92 = (0 * (0 >= -1 + 2 * x0) + (-1 + 2 * x0) * (-1 + 2 * x0 > 0)) * (
0 * (0 >= -1 + 2 * x1) + (-1 + 2 * x1) * (-1 + 2 * x1 > 0)) + (5 *
(5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 + 2 * x0 < 5)) * (5 * (5 <= 2 +
2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 5)) + -1 * (0 * (0 >= -1 + 2 *
x0) + (-1 + 2 * x0) * (-1 + 2 * x0 > 0)) * (5 * (5 <= 2 + 2 * x1) +
(2 + 2 * x1) * (2 + 2 * x1 < 5)) + -1 * (0 * (0 >= -1 + 2 * x1) + (
-1 + 2 * x1) * (-1 + 2 * x1 > 0)) * (5 * (5 <= 2 + 2 * x0) + (2 + 2 *
x0) * (2 + 2 * x0 < 5))
tmp93 = tmp91 / tmp92
tl.store(out_ptr0 + x4, tmp93, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_constant_pad_nd_0[grid(144)](arg0_1,
buf0, 144, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 4, 2, 2), (36, 9, 3, 1), 4),
class AvgPoolPadNew(nn.Module):
def __init__(self, stride=2, padding=1):
super(AvgPoolPadNew, self).__init__()
self.pad = nn.ZeroPad2d((1, 0, 1, 0))
self.pool = nn.AvgPool2d(3, stride=stride, padding=padding,
count_include_pad=False)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
dowhilefalse/DeOldify
|
AvgPoolPad
| false
| 12,305
|
[
"MIT"
] | 0
|
08f012cdbe36e3f8482460f57e1844b361a7fb16
|
https://github.com/dowhilefalse/DeOldify/tree/08f012cdbe36e3f8482460f57e1844b361a7fb16
|
MultiHead
|
import math
import torch
from torch import Tensor
from torch.nn import Linear
import torch.nn.functional as F
from torch.nn import Parameter
import torch.utils.data
def uniform(size, tensor):
bound = 1.0 / math.sqrt(size)
if tensor is not None:
tensor.data.uniform_(-bound, bound)
def kaiming_uniform(tensor, fan, a):
if tensor is not None:
bound = math.sqrt(6 / ((1 + a ** 2) * fan))
tensor.data.uniform_(-bound, bound)
def restricted_softmax(src, dim=-1, margin=0):
src_max = torch.clamp(src.max(dim=dim, keepdim=True)[0], min=0)
out = (src - src_max).exp()
out = out / (out.sum(dim=dim, keepdim=True) + (margin - src_max).exp())
return out
class Linear(torch.nn.Module):
def __init__(self, in_channels, out_channels, groups=1, bias=True):
super(Linear, self).__init__()
assert in_channels % groups == 0 and out_channels % groups == 0
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
self.weight = Parameter(Tensor(groups, in_channels // groups,
out_channels // groups))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5))
uniform(self.weight.size(1), self.bias)
def forward(self, src):
if self.groups > 1:
size = list(src.size())[:-1]
src = src.view(-1, self.groups, self.in_channels // self.groups)
src = src.transpose(0, 1).contiguous()
out = torch.matmul(src, self.weight)
out = out.transpose(1, 0).contiguous()
out = out.view(*(size + [self.out_channels]))
else:
out = torch.matmul(src, self.weight.squeeze(0))
if self.bias is not None:
out += self.bias
return out
def __repr__(self):
return '{}({}, {}, groups={}, bias={})'.format(self.__class__.
__name__, self.in_channels, self.out_channels, self.groups,
self.bias is not None)
class Attention(torch.nn.Module):
def __init__(self, dropout=0):
super(Attention, self).__init__()
self.dropout = dropout
def forward(self, query, key, value):
assert query.dim() == key.dim() == value.dim() >= 2
assert query.size(-1) == key.size(-1)
assert key.size(-2) == value.size(-2)
score = torch.matmul(query, key.transpose(-2, -1))
score = score / math.sqrt(key.size(-1))
score = restricted_softmax(score, dim=-1)
score = F.dropout(score, p=self.dropout, training=self.training)
return torch.matmul(score, value)
def __repr__(self):
return '{}(dropout={})'.format(self.__class__.__name__, self.dropout)
class MultiHead(Attention):
def __init__(self, in_channels, out_channels, heads=1, groups=1,
dropout=0, bias=True):
super(MultiHead, self).__init__(dropout)
self.in_channels = in_channels
self.out_channels = out_channels
self.heads = heads
self.groups = groups
self.bias = bias
assert in_channels % heads == 0 and out_channels % heads == 0
assert in_channels % groups == 0 and out_channels % groups == 0
assert max(groups, self.heads) % min(groups, self.heads) == 0
self.lin_q = Linear(in_channels, out_channels, groups, bias)
self.lin_k = Linear(in_channels, out_channels, groups, bias)
self.lin_v = Linear(in_channels, out_channels, groups, bias)
self.reset_parameters()
def reset_parameters(self):
self.lin_q.reset_parameters()
self.lin_k.reset_parameters()
self.lin_v.reset_parameters()
def forward(self, query, key, value):
assert query.dim() == key.dim() == value.dim() >= 2
assert query.size(-1) == key.size(-1) == value.size(-1)
assert key.size(-2) == value.size(-2)
query = self.lin_q(query)
key = self.lin_k(key)
value = self.lin_v(value)
size = list(query.size())[:-2]
out_channels_per_head = self.out_channels // self.heads
query_size = size + [query.size(-2), self.heads, out_channels_per_head]
query = query.view(*query_size).transpose(-2, -3)
key_size = size + [key.size(-2), self.heads, out_channels_per_head]
key = key.view(*key_size).transpose(-2, -3)
value_size = size + [value.size(-2), self.heads, out_channels_per_head]
value = value.view(*value_size).transpose(-2, -3)
out = super(MultiHead, self).forward(query, key, value)
out = out.transpose(-3, -2).contiguous()
out = out.view(*(size + [query.size(-2), self.out_channels]))
return out
def __repr__(self):
return '{}({}, {}, heads={}, groups={}, dropout={}, bias={})'.format(
self.__class__.__name__, self.in_channels, self.out_channels,
self.heads, self.groups, self.dropout, self.bias)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
from torch import Tensor
from torch.nn import Linear
import torch.nn.functional as F
from torch.nn import Parameter
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_bmm_transpose_1(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2 + 64 * ((x1 + 4 * (x2 %
4)) // 16)), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
tl.store(out_ptr1 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_clamp_div_exp_max_sub_2(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = 0.0
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = tmp2 - tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp11 = tmp10 * tmp8
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp13 * tmp8
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp17 = tmp16 * tmp8
tmp18 = triton_helpers.maximum(tmp15, tmp17)
tmp19 = 0.0
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tmp19 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp6 + tmp22
tl.store(out_ptr0 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_4(in_out_ptr0, in_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 / tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 0), out=buf0)
del primals_4
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (4, 1), 0), out=buf1)
del primals_6
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (4, 1), 0), out=buf2)
del primals_8
buf3 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](buf3, primals_5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_5
buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_add_0[grid(256)](buf4, primals_7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_7
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
buf15 = empty_strided_cuda((16, 4, 4), (16, 1, 4), torch.float32)
triton_poi_fused_bmm_transpose_1[grid(256)](buf3, buf5, buf15, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0)
del buf3
buf16 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_bmm_transpose_1[grid(256)](buf4, buf6, buf16, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0)
del buf4
extern_kernels.bmm(buf5, buf6, out=buf7)
buf8 = reinterpret_tensor(buf6, (4, 4, 1, 4, 4), (64, 16, 256, 4, 1), 0
)
del buf6
triton_poi_fused_clamp_div_exp_max_sub_2[grid(256)](buf7, buf8, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf9 = empty_strided_cuda((4, 4, 1, 4, 1), (16, 4, 64, 1, 64),
torch.float32)
triton_poi_fused_add_clamp_div_exp_max_rsub_sum_3[grid(64)](buf8,
buf7, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf10 = reinterpret_tensor(buf8, (4, 4, 1, 4, 4), (64, 16, 16, 4, 1), 0
)
del buf8
triton_poi_fused_add_clamp_div_exp_max_rsub_sum_4[grid(256)](buf10,
buf9, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf9
buf11 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_add_0[grid(256)](buf11, primals_9, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_9
buf12 = buf5
del buf5
buf14 = empty_strided_cuda((16, 4, 4), (16, 1, 4), torch.float32)
triton_poi_fused_bmm_transpose_1[grid(256)](buf11, buf12, buf14,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf13 = reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1), 0)
del buf11
extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1),
0), buf12, out=buf13)
del buf12
return reinterpret_tensor(buf13, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf7, buf10, buf14, buf15, buf16, reinterpret_tensor(primals_3,
(4, 64), (1, 4), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0
), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0)
def uniform(size, tensor):
bound = 1.0 / math.sqrt(size)
if tensor is not None:
tensor.data.uniform_(-bound, bound)
def kaiming_uniform(tensor, fan, a):
if tensor is not None:
bound = math.sqrt(6 / ((1 + a ** 2) * fan))
tensor.data.uniform_(-bound, bound)
def restricted_softmax(src, dim=-1, margin=0):
src_max = torch.clamp(src.max(dim=dim, keepdim=True)[0], min=0)
out = (src - src_max).exp()
out = out / (out.sum(dim=dim, keepdim=True) + (margin - src_max).exp())
return out
class Linear(torch.nn.Module):
def __init__(self, in_channels, out_channels, groups=1, bias=True):
super(Linear, self).__init__()
assert in_channels % groups == 0 and out_channels % groups == 0
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
self.weight = Parameter(Tensor(groups, in_channels // groups,
out_channels // groups))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5))
uniform(self.weight.size(1), self.bias)
def forward(self, src):
if self.groups > 1:
size = list(src.size())[:-1]
src = src.view(-1, self.groups, self.in_channels // self.groups)
src = src.transpose(0, 1).contiguous()
out = torch.matmul(src, self.weight)
out = out.transpose(1, 0).contiguous()
out = out.view(*(size + [self.out_channels]))
else:
out = torch.matmul(src, self.weight.squeeze(0))
if self.bias is not None:
out += self.bias
return out
def __repr__(self):
return '{}({}, {}, groups={}, bias={})'.format(self.__class__.
__name__, self.in_channels, self.out_channels, self.groups,
self.bias is not None)
class Attention(torch.nn.Module):
def __init__(self, dropout=0):
super(Attention, self).__init__()
self.dropout = dropout
def forward(self, query, key, value):
assert query.dim() == key.dim() == value.dim() >= 2
assert query.size(-1) == key.size(-1)
assert key.size(-2) == value.size(-2)
score = torch.matmul(query, key.transpose(-2, -1))
score = score / math.sqrt(key.size(-1))
score = restricted_softmax(score, dim=-1)
score = F.dropout(score, p=self.dropout, training=self.training)
return torch.matmul(score, value)
def __repr__(self):
return '{}(dropout={})'.format(self.__class__.__name__, self.dropout)
class MultiHeadNew(Attention):
def __init__(self, in_channels, out_channels, heads=1, groups=1,
dropout=0, bias=True):
super(MultiHeadNew, self).__init__(dropout)
self.in_channels = in_channels
self.out_channels = out_channels
self.heads = heads
self.groups = groups
self.bias = bias
assert in_channels % heads == 0 and out_channels % heads == 0
assert in_channels % groups == 0 and out_channels % groups == 0
assert max(groups, self.heads) % min(groups, self.heads) == 0
self.lin_q = Linear(in_channels, out_channels, groups, bias)
self.lin_k = Linear(in_channels, out_channels, groups, bias)
self.lin_v = Linear(in_channels, out_channels, groups, bias)
self.reset_parameters()
def reset_parameters(self):
self.lin_q.reset_parameters()
self.lin_k.reset_parameters()
self.lin_v.reset_parameters()
def __repr__(self):
return '{}({}, {}, heads={}, groups={}, dropout={}, bias={})'.format(
self.__class__.__name__, self.in_channels, self.out_channels,
self.heads, self.groups, self.dropout, self.bias)
def forward(self, input_0, input_1, input_2):
primals_4 = self.lin_q.weight
primals_5 = self.lin_q.bias
primals_6 = self.lin_k.weight
primals_7 = self.lin_k.bias
primals_8 = self.lin_v.weight
primals_9 = self.lin_v.bias
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
douglasrizzo/pytorch_geometric
|
MultiHead
| false
| 12,306
|
[
"MIT"
] | 0
|
effc617c6ad6daad506038bb79e4407082e74740
|
https://github.com/douglasrizzo/pytorch_geometric/tree/effc617c6ad6daad506038bb79e4407082e74740
|
ResidualLayer
|
import math
import torch
from torch import Tensor
from torch.nn import Linear
from torch.nn import Parameter
import torch.utils.data
def uniform(size, tensor):
bound = 1.0 / math.sqrt(size)
if tensor is not None:
tensor.data.uniform_(-bound, bound)
def kaiming_uniform(tensor, fan, a):
if tensor is not None:
bound = math.sqrt(6 / ((1 + a ** 2) * fan))
tensor.data.uniform_(-bound, bound)
def swish(x):
return x * x.sigmoid()
def glorot_orthogonal(tensor, scale):
if tensor is not None:
torch.nn.init.orthogonal_(tensor.data)
scale /= (tensor.size(-2) + tensor.size(-1)) * tensor.var()
tensor.data *= scale.sqrt()
class Linear(torch.nn.Module):
def __init__(self, in_channels, out_channels, groups=1, bias=True):
super(Linear, self).__init__()
assert in_channels % groups == 0 and out_channels % groups == 0
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
self.weight = Parameter(Tensor(groups, in_channels // groups,
out_channels // groups))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5))
uniform(self.weight.size(1), self.bias)
def forward(self, src):
if self.groups > 1:
size = list(src.size())[:-1]
src = src.view(-1, self.groups, self.in_channels // self.groups)
src = src.transpose(0, 1).contiguous()
out = torch.matmul(src, self.weight)
out = out.transpose(1, 0).contiguous()
out = out.view(*(size + [self.out_channels]))
else:
out = torch.matmul(src, self.weight.squeeze(0))
if self.bias is not None:
out += self.bias
return out
def __repr__(self):
return '{}({}, {}, groups={}, bias={})'.format(self.__class__.
__name__, self.in_channels, self.out_channels, self.groups,
self.bias is not None)
class ResidualLayer(torch.nn.Module):
def __init__(self, hidden_channels, act=swish):
super(ResidualLayer, self).__init__()
self.act = act
self.lin1 = Linear(hidden_channels, hidden_channels)
self.lin2 = Linear(hidden_channels, hidden_channels)
self.reset_parameters()
def reset_parameters(self):
glorot_orthogonal(self.lin1.weight, scale=2.0)
self.lin1.bias.data.fill_(0)
glorot_orthogonal(self.lin2.weight, scale=2.0)
self.lin2.bias.data.fill_(0)
def forward(self, x):
return x + self.act(self.lin2(self.act(self.lin1(x))))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
from torch import Tensor
from torch.nn import Linear
from torch.nn import Parameter
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tl.sigmoid(tmp3)
tmp5 = tmp3 * tmp4
tmp6 = tmp0 + tmp5
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](buf0, primals_3, buf1,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_1[grid(256)](primals_2, buf2,
primals_5, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
return buf3, primals_3, primals_5, buf0, buf2, reinterpret_tensor(buf1,
(4, 64), (1, 4), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0)
def uniform(size, tensor):
bound = 1.0 / math.sqrt(size)
if tensor is not None:
tensor.data.uniform_(-bound, bound)
def kaiming_uniform(tensor, fan, a):
if tensor is not None:
bound = math.sqrt(6 / ((1 + a ** 2) * fan))
tensor.data.uniform_(-bound, bound)
def swish(x):
return x * x.sigmoid()
def glorot_orthogonal(tensor, scale):
if tensor is not None:
torch.nn.init.orthogonal_(tensor.data)
scale /= (tensor.size(-2) + tensor.size(-1)) * tensor.var()
tensor.data *= scale.sqrt()
class Linear(torch.nn.Module):
def __init__(self, in_channels, out_channels, groups=1, bias=True):
super(Linear, self).__init__()
assert in_channels % groups == 0 and out_channels % groups == 0
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
self.weight = Parameter(Tensor(groups, in_channels // groups,
out_channels // groups))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5))
uniform(self.weight.size(1), self.bias)
def forward(self, src):
if self.groups > 1:
size = list(src.size())[:-1]
src = src.view(-1, self.groups, self.in_channels // self.groups)
src = src.transpose(0, 1).contiguous()
out = torch.matmul(src, self.weight)
out = out.transpose(1, 0).contiguous()
out = out.view(*(size + [self.out_channels]))
else:
out = torch.matmul(src, self.weight.squeeze(0))
if self.bias is not None:
out += self.bias
return out
def __repr__(self):
return '{}({}, {}, groups={}, bias={})'.format(self.__class__.
__name__, self.in_channels, self.out_channels, self.groups,
self.bias is not None)
class ResidualLayerNew(torch.nn.Module):
def __init__(self, hidden_channels, act=swish):
super(ResidualLayerNew, self).__init__()
self.act = act
self.lin1 = Linear(hidden_channels, hidden_channels)
self.lin2 = Linear(hidden_channels, hidden_channels)
self.reset_parameters()
def reset_parameters(self):
glorot_orthogonal(self.lin1.weight, scale=2.0)
self.lin1.bias.data.fill_(0)
glorot_orthogonal(self.lin2.weight, scale=2.0)
self.lin2.bias.data.fill_(0)
def forward(self, input_0):
primals_1 = self.lin1.weight
primals_3 = self.lin1.bias
primals_4 = self.lin2.weight
primals_5 = self.lin2.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
douglasrizzo/pytorch_geometric
|
ResidualLayer
| false
| 12,307
|
[
"MIT"
] | 0
|
effc617c6ad6daad506038bb79e4407082e74740
|
https://github.com/douglasrizzo/pytorch_geometric/tree/effc617c6ad6daad506038bb79e4407082e74740
|
BPRLoss
|
import torch
import torch.nn as nn
class BPRLoss(nn.Module):
""" BPRLoss, based on Bayesian Personalized Ranking
Args:
- gamma(float): Small value to avoid division by zero
Shape:
- Pos_score: (N)
- Neg_score: (N), same shape as the Pos_score
- Output: scalar.
Examples::
>>> loss = BPRLoss()
>>> pos_score = torch.randn(3, requires_grad=True)
>>> neg_score = torch.randn(3, requires_grad=True)
>>> output = loss(pos_score, neg_score)
>>> output.backward()
"""
def __init__(self, gamma=1e-10):
super(BPRLoss, self).__init__()
self.gamma = gamma
def forward(self, pos_score, neg_score):
loss = -torch.log(self.gamma + torch.sigmoid(pos_score - neg_score)
).mean()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_log_mean_neg_sigmoid_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = 1e-10
tmp5 = tmp3 + tmp4
tmp6 = tl_math.log(tmp5)
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = 256.0
tmp11 = tmp9 / tmp10
tmp12 = -tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_log_mean_neg_sigmoid_sub_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class BPRLossNew(nn.Module):
""" BPRLoss, based on Bayesian Personalized Ranking
Args:
- gamma(float): Small value to avoid division by zero
Shape:
- Pos_score: (N)
- Neg_score: (N), same shape as the Pos_score
- Output: scalar.
Examples::
>>> loss = BPRLoss()
>>> pos_score = torch.randn(3, requires_grad=True)
>>> neg_score = torch.randn(3, requires_grad=True)
>>> output = loss(pos_score, neg_score)
>>> output.backward()
"""
def __init__(self, gamma=1e-10):
super(BPRLossNew, self).__init__()
self.gamma = gamma
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
dreaming-qin/RecBole
|
BPRLoss
| false
| 12,308
|
[
"MIT"
] | 0
|
d6de39521484ded60c387ca604abaf86310acdbe
|
https://github.com/dreaming-qin/RecBole/tree/d6de39521484ded60c387ca604abaf86310acdbe
|
ResNetV2
|
import torch
import torch.nn as nn
from collections import OrderedDict
import torch.nn.functional as F
def conv1x1(cin, cout, stride=1, bias=False):
return StdConv2d(cin, cout, kernel_size=1, stride=stride, padding=0,
bias=bias)
def conv3x3(cin, cout, stride=1, groups=1, bias=False):
return StdConv2d(cin, cout, kernel_size=3, stride=stride, padding=1,
bias=bias, groups=groups)
def np2th(weights, conv=False):
"""Possibly convert HWIO to OIHW."""
if conv:
weights = weights.transpose([3, 2, 0, 1])
return torch.from_numpy(weights)
class StdConv2d(nn.Conv2d):
def forward(self, x):
w = self.weight
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-05)
return F.conv2d(x, w, self.bias, self.stride, self.padding, self.
dilation, self.groups)
class PreActBottleneck(nn.Module):
"""Pre-activation (v2) bottleneck block.
"""
def __init__(self, cin, cout=None, cmid=None, stride=1):
super().__init__()
cout = cout or cin
cmid = cmid or cout // 4
self.gn1 = nn.GroupNorm(32, cmid, eps=1e-06)
self.conv1 = conv1x1(cin, cmid, bias=False)
self.gn2 = nn.GroupNorm(32, cmid, eps=1e-06)
self.conv2 = conv3x3(cmid, cmid, stride, bias=False)
self.gn3 = nn.GroupNorm(32, cout, eps=1e-06)
self.conv3 = conv1x1(cmid, cout, bias=False)
self.relu = nn.ReLU(inplace=True)
if stride != 1 or cin != cout:
self.downsample = conv1x1(cin, cout, stride, bias=False)
self.gn_proj = nn.GroupNorm(cout, cout)
def forward(self, x):
residual = x
if hasattr(self, 'downsample'):
residual = self.downsample(x)
residual = self.gn_proj(residual)
y = self.relu(self.gn1(self.conv1(x)))
y = self.relu(self.gn2(self.conv2(y)))
y = self.gn3(self.conv3(y))
y = self.relu(residual + y)
return y
def load_from(self, weights, n_block, n_unit):
conv1_weight = np2th(weights[pjoin(n_block, n_unit, 'conv1/kernel')
], conv=True)
conv2_weight = np2th(weights[pjoin(n_block, n_unit, 'conv2/kernel')
], conv=True)
conv3_weight = np2th(weights[pjoin(n_block, n_unit, 'conv3/kernel')
], conv=True)
gn1_weight = np2th(weights[pjoin(n_block, n_unit, 'gn1/scale')])
gn1_bias = np2th(weights[pjoin(n_block, n_unit, 'gn1/bias')])
gn2_weight = np2th(weights[pjoin(n_block, n_unit, 'gn2/scale')])
gn2_bias = np2th(weights[pjoin(n_block, n_unit, 'gn2/bias')])
gn3_weight = np2th(weights[pjoin(n_block, n_unit, 'gn3/scale')])
gn3_bias = np2th(weights[pjoin(n_block, n_unit, 'gn3/bias')])
self.conv1.weight.copy_(conv1_weight)
self.conv2.weight.copy_(conv2_weight)
self.conv3.weight.copy_(conv3_weight)
self.gn1.weight.copy_(gn1_weight.view(-1))
self.gn1.bias.copy_(gn1_bias.view(-1))
self.gn2.weight.copy_(gn2_weight.view(-1))
self.gn2.bias.copy_(gn2_bias.view(-1))
self.gn3.weight.copy_(gn3_weight.view(-1))
self.gn3.bias.copy_(gn3_bias.view(-1))
if hasattr(self, 'downsample'):
proj_conv_weight = np2th(weights[pjoin(n_block, n_unit,
'conv_proj/kernel')], conv=True)
proj_gn_weight = np2th(weights[pjoin(n_block, n_unit,
'gn_proj/scale')])
proj_gn_bias = np2th(weights[pjoin(n_block, n_unit,
'gn_proj/bias')])
self.downsample.weight.copy_(proj_conv_weight)
self.gn_proj.weight.copy_(proj_gn_weight.view(-1))
self.gn_proj.bias.copy_(proj_gn_bias.view(-1))
class ResNetV2(nn.Module):
"""Implementation of Pre-activation (v2) ResNet mode."""
def __init__(self, block_units, width_factor):
super().__init__()
width = int(64 * width_factor)
self.width = width
self.root = nn.Sequential(OrderedDict([('conv', StdConv2d(3, width,
kernel_size=7, stride=2, bias=False, padding=3)), ('gn', nn.
GroupNorm(32, width, eps=1e-06)), ('relu', nn.ReLU(inplace=True
)), ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=0))]))
self.body = nn.Sequential(OrderedDict([('block1', nn.Sequential(
OrderedDict([('unit1', PreActBottleneck(cin=width, cout=width *
4, cmid=width))] + [(f'unit{i:d}', PreActBottleneck(cin=width *
4, cout=width * 4, cmid=width)) for i in range(2, block_units[0
] + 1)]))), ('block2', nn.Sequential(OrderedDict([('unit1',
PreActBottleneck(cin=width * 4, cout=width * 8, cmid=width * 2,
stride=2))] + [(f'unit{i:d}', PreActBottleneck(cin=width * 8,
cout=width * 8, cmid=width * 2)) for i in range(2, block_units[
1] + 1)]))), ('block3', nn.Sequential(OrderedDict([('unit1',
PreActBottleneck(cin=width * 8, cout=width * 16, cmid=width * 4,
stride=2))] + [(f'unit{i:d}', PreActBottleneck(cin=width * 16,
cout=width * 16, cmid=width * 4)) for i in range(2, block_units
[2] + 1)])))]))
def forward(self, x):
x = self.root(x)
x = self.body(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'block_units': [4, 4, 4], 'width_factor': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from collections import OrderedDict
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 768
xnumel = 49
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 49 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 147 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 1024
y1 = yindex // 1024
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 1024 * x2 + 9216 * y1), tmp0, xmask)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_5(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 256
rnumel = 147
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 147 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(rmask & xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask & xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 147, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(rmask & xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 147.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.sqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 / tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr1 + (r1 + 147 * x0), tmp23, rmask & xmask)
@triton.jit
def triton_red_fused_native_group_norm_6(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 8
r3 = rindex // 8
tmp0 = tl.load(in_ptr0 + (r2 + 8 * x0 + 256 * r3 + 262144 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 8192.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_7(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 256
x2 = xindex // 262144
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 8), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 8), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 8192.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 256
x1 = xindex // 256 % 15
x2 = xindex // 3840 % 15
x3 = xindex // 57600
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 16384 * x2 + 262144 * x3), xmask)
tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 16384 * x2 + 262144 *
x3), xmask)
tmp3 = tl.load(in_ptr0 + (512 + x0 + 512 * x1 + 16384 * x2 + 262144 *
x3), xmask)
tmp5 = tl.load(in_ptr0 + (8192 + x0 + 512 * x1 + 16384 * x2 + 262144 *
x3), xmask)
tmp7 = tl.load(in_ptr0 + (8448 + x0 + 512 * x1 + 16384 * x2 + 262144 *
x3), xmask)
tmp9 = tl.load(in_ptr0 + (8704 + x0 + 512 * x1 + 16384 * x2 + 262144 *
x3), xmask)
tmp11 = tl.load(in_ptr0 + (16384 + x0 + 512 * x1 + 16384 * x2 + 262144 *
x3), xmask)
tmp13 = tl.load(in_ptr0 + (16640 + x0 + 512 * x1 + 16384 * x2 + 262144 *
x3), xmask)
tmp15 = tl.load(in_ptr0 + (16896 + x0 + 512 * x1 + 16384 * x2 + 262144 *
x3), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tl.store(out_ptr0 + x4, tmp16, xmask)
tl.store(out_ptr1 + x4, tmp41, xmask)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_9(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 256 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 256, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 256 * x0), tmp20, None)
@triton.jit
def triton_per_fused_native_group_norm_10(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
rnumel = 225
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r2 = rindex
x0 = xindex % 1024
x1 = xindex // 1024
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 1024 * r2 + 230400 * x1), rmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(rmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 225, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(rmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 225.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tl.store(out_ptr2 + x3, tmp21, None)
tl.store(out_ptr0 + x3, tmp10, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_11(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 256 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 256, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 256 * x0), tmp20, None)
@triton.jit
def triton_red_fused_native_group_norm_12(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 1800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 8
r3 = rindex // 8
tmp0 = tl.load(in_ptr0 + (r2 + 8 * x0 + 256 * r3 + 57600 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 1800.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_13(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 256
x2 = xindex // 57600
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 8), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 8), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1800.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_14(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 256
rnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 2304 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 2304.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 2304 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 2304 * x0), tmp12, rmask & xmask)
@triton.jit
def triton_red_fused_native_group_norm_15(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 7200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 32
r3 = rindex // 32
tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 230400 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 7200.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_native_group_norm_relu_16(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8,
in_ptr9, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 1024
x2 = xindex // 230400
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (x0 + 1024 * x2), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 1024 * x2), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr5 + x3, None)
tmp15 = tl.load(in_ptr6 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr7 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr8 + x0, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr9 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 225.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp16 = tmp14 - tmp15
tmp18 = 7200.0
tmp19 = tmp17 / tmp18
tmp20 = 1e-06
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp16 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tmp13 + tmp27
tmp29 = tl.full([1], 0, tl.int32)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tl.store(in_out_ptr0 + x3, tmp30, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_17(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None)
@triton.jit
def triton_poi_fused_add_native_group_norm_relu_18(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 1024
x2 = xindex // 230400
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x3, None)
tmp2 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr3 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp5 = 7200.0
tmp6 = tmp4 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tmp10 = tmp3 * tmp9
tmp12 = tmp10 * tmp11
tmp14 = tmp12 + tmp13
tmp15 = tmp0 + tmp14
tmp16 = tl.full([1], 0, tl.int32)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tl.store(out_ptr0 + x3, tmp17, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_19(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None)
@triton.jit
def triton_per_fused_native_group_norm_20(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 2048
x1 = xindex // 2048
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 2048 * r2 + 131072 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tmp14 = 64.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + x3, tmp18, None)
tl.store(out_ptr0 + x3, tmp8, None)
tl.store(out_ptr1 + x3, tmp13, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_21(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None)
@triton.jit
def triton_red_fused_native_group_norm_22(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 3600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 16
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r2 + 16 * x0 + 512 * r3 + 115200 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 3600.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_23(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 512
x2 = xindex // 115200
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 16), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 16), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 3600.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_24(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 512
rnumel = 4608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 4608 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 4608.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 4608 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 4608 * x0), tmp12, rmask & xmask)
@triton.jit
def triton_per_fused_native_group_norm_25(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 16
r3 = rindex // 16
x0 = xindex % 32
x1 = xindex // 32
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + 16 * x0 + 512 * r3 + 32768 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-06
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + x4, tmp18, None)
tl.store(out_ptr0 + x4, tmp8, None)
tl.store(out_ptr1 + x4, tmp13, None)
@triton.jit
def triton_poi_fused_native_group_norm_relu_26(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 512
x2 = xindex // 32768
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 16), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 16), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1024.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_27(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 512 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 512, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 512.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 512 * x0), tmp20, None)
@triton.jit
def triton_red_fused_native_group_norm_28(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 64
r3 = rindex // 64
tmp0 = tl.load(in_ptr0 + (r2 + 64 * x0 + 2048 * r3 + 131072 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_native_group_norm_relu_29(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8,
in_ptr9, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 2048
x2 = xindex // 131072
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (x0 + 2048 * x2), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 2048 * x2), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr5 + x3, None)
tmp15 = tl.load(in_ptr6 + (32 * x2 + x0 // 64), None, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr7 + (32 * x2 + x0 // 64), None, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr8 + x0, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr9 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 64.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp16 = tmp14 - tmp15
tmp18 = 4096.0
tmp19 = tmp17 / tmp18
tmp20 = 1e-06
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp16 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tmp13 + tmp27
tmp29 = tl.full([1], 0, tl.int32)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tl.store(in_out_ptr0 + x3, tmp30, None)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_30(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 512
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask & xmask)
@triton.jit
def triton_poi_fused_add_native_group_norm_relu_31(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 2048
x2 = xindex // 131072
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x3, None)
tmp2 = tl.load(in_ptr2 + (32 * x2 + x0 // 64), None, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr3 + (32 * x2 + x0 // 64), None, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp5 = 4096.0
tmp6 = tmp4 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tmp10 = tmp3 * tmp9
tmp12 = tmp10 * tmp11
tmp14 = tmp12 + tmp13
tmp15 = tmp0 + tmp14
tmp16 = tl.full([1], 0, tl.int32)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tl.store(out_ptr0 + x3, tmp17, None)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_32(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, None)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask, eviction_policy=
'evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask)
@triton.jit
def triton_per_fused_native_group_norm_33(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4096
x1 = xindex // 4096
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4096 * r2 + 65536 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tmp14 = 16.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + x3, tmp18, None)
tl.store(out_ptr0 + x3, tmp8, None)
tl.store(out_ptr1 + x3, tmp13, None)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_34(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 1024
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask & xmask)
@triton.jit
def triton_red_fused_native_group_norm_35(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 32
r3 = rindex // 32
tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 65536 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_36(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 1024
x2 = xindex // 65536
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 2048.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_37(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 1024
rnumel = 9216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 9216 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 9216.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 9216 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 9216 * x0), tmp12, rmask & xmask)
@triton.jit
def triton_per_fused_native_group_norm_38(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 32
r3 = rindex // 32
x0 = xindex % 32
x1 = xindex // 32
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 16384 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 512, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 512.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-06
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + x4, tmp18, None)
tl.store(out_ptr0 + x4, tmp8, None)
tl.store(out_ptr1 + x4, tmp13, None)
@triton.jit
def triton_poi_fused_native_group_norm_relu_39(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 1024
x2 = xindex // 16384
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 512.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_40(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None)
@triton.jit
def triton_red_fused_native_group_norm_41(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 128
r3 = rindex // 128
tmp0 = tl.load(in_ptr0 + (r2 + 128 * x0 + 4096 * r3 + 65536 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_native_group_norm_relu_42(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8,
in_ptr9, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 4096
x2 = xindex // 65536
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (x0 + 4096 * x2), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 4096 * x2), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr5 + x3, None)
tmp15 = tl.load(in_ptr6 + (32 * x2 + x0 // 128), None, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr7 + (32 * x2 + x0 // 128), None, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr8 + x0, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr9 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 16.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp16 = tmp14 - tmp15
tmp18 = 2048.0
tmp19 = tmp17 / tmp18
tmp20 = 1e-06
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp16 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tmp13 + tmp27
tmp29 = tl.full([1], 0, tl.int32)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tl.store(in_out_ptr0 + x3, tmp30, None)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_43(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 1024
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 4096 * x0), tmp12, rmask & xmask)
@triton.jit
def triton_poi_fused_add_native_group_norm_relu_44(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 4096
x2 = xindex // 65536
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x3, None)
tmp2 = tl.load(in_ptr2 + (32 * x2 + x0 // 128), None, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr3 + (32 * x2 + x0 // 128), None, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp5 = 2048.0
tmp6 = tmp4 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tmp10 = tmp3 * tmp9
tmp12 = tmp10 * tmp11
tmp14 = tmp12 + tmp13
tmp15 = tmp0 + tmp14
tmp16 = tl.full([1], 0, tl.int32)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tl.store(out_ptr0 + x3, tmp17, None)
@triton.jit
def triton_poi_fused_add_native_group_norm_relu_45(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y1 = yindex // 16
y0 = yindex % 16
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr2 + (32 * y1 + x2 // 128), ymask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr3 + (32 * y1 + x2 // 128), ymask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + x2, None, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp5 = 2048.0
tmp6 = tmp4 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tmp10 = tmp3 * tmp9
tmp12 = tmp10 * tmp11
tmp14 = tmp12 + tmp13
tmp15 = tmp0 + tmp14
tmp16 = tl.full([1, 1], 0, tl.int32)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tl.store(out_ptr0 + (y0 + 16 * x2 + 65536 * y1), tmp17, ymask)
@triton.jit
def triton_poi_fused_threshold_backward_46(in_ptr0, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4096
y1 = yindex // 4096
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tmp1 = 0.0
tmp2 = tmp0 <= tmp1
tl.store(out_ptr0 + (y0 + 4096 * x2 + 65536 * y1), tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47,
primals_48, primals_49, primals_50, primals_51, primals_52,
primals_53, primals_54, primals_55, primals_56, primals_57,
primals_58, primals_59, primals_60, primals_61, primals_62,
primals_63, primals_64, primals_65, primals_66, primals_67,
primals_68, primals_69, primals_70, primals_71, primals_72,
primals_73, primals_74, primals_75, primals_76, primals_77,
primals_78, primals_79, primals_80, primals_81, primals_82,
primals_83, primals_84, primals_85, primals_86, primals_87,
primals_88, primals_89, primals_90, primals_91, primals_92,
primals_93, primals_94, primals_95, primals_96, primals_97,
primals_98, primals_99, primals_100, primals_101, primals_102,
primals_103, primals_104, primals_105, primals_106, primals_107,
primals_108, primals_109, primals_110, primals_111, primals_112,
primals_113, primals_114, primals_115, primals_116, primals_117,
primals_118, primals_119, primals_120, primals_121) = args
args.clear()
assert_size_stride(primals_1, (256, 3, 7, 7), (147, 49, 7, 1))
assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_3, (256,), (1,))
assert_size_stride(primals_4, (256,), (1,))
assert_size_stride(primals_5, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_6, (1024,), (1,))
assert_size_stride(primals_7, (1024,), (1,))
assert_size_stride(primals_8, (256, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (256,), (1,))
assert_size_stride(primals_11, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_12, (256,), (1,))
assert_size_stride(primals_13, (256,), (1,))
assert_size_stride(primals_14, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_15, (1024,), (1,))
assert_size_stride(primals_16, (1024,), (1,))
assert_size_stride(primals_17, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_18, (256,), (1,))
assert_size_stride(primals_19, (256,), (1,))
assert_size_stride(primals_20, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_21, (256,), (1,))
assert_size_stride(primals_22, (256,), (1,))
assert_size_stride(primals_23, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_24, (1024,), (1,))
assert_size_stride(primals_25, (1024,), (1,))
assert_size_stride(primals_26, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_27, (256,), (1,))
assert_size_stride(primals_28, (256,), (1,))
assert_size_stride(primals_29, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_30, (256,), (1,))
assert_size_stride(primals_31, (256,), (1,))
assert_size_stride(primals_32, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_33, (1024,), (1,))
assert_size_stride(primals_34, (1024,), (1,))
assert_size_stride(primals_35, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_36, (256,), (1,))
assert_size_stride(primals_37, (256,), (1,))
assert_size_stride(primals_38, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_39, (256,), (1,))
assert_size_stride(primals_40, (256,), (1,))
assert_size_stride(primals_41, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_42, (1024,), (1,))
assert_size_stride(primals_43, (1024,), (1,))
assert_size_stride(primals_44, (2048, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_45, (2048,), (1,))
assert_size_stride(primals_46, (2048,), (1,))
assert_size_stride(primals_47, (512, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_48, (512,), (1,))
assert_size_stride(primals_49, (512,), (1,))
assert_size_stride(primals_50, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_51, (512,), (1,))
assert_size_stride(primals_52, (512,), (1,))
assert_size_stride(primals_53, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_54, (2048,), (1,))
assert_size_stride(primals_55, (2048,), (1,))
assert_size_stride(primals_56, (512, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_57, (512,), (1,))
assert_size_stride(primals_58, (512,), (1,))
assert_size_stride(primals_59, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_60, (512,), (1,))
assert_size_stride(primals_61, (512,), (1,))
assert_size_stride(primals_62, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_63, (2048,), (1,))
assert_size_stride(primals_64, (2048,), (1,))
assert_size_stride(primals_65, (512, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_66, (512,), (1,))
assert_size_stride(primals_67, (512,), (1,))
assert_size_stride(primals_68, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_69, (512,), (1,))
assert_size_stride(primals_70, (512,), (1,))
assert_size_stride(primals_71, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_72, (2048,), (1,))
assert_size_stride(primals_73, (2048,), (1,))
assert_size_stride(primals_74, (512, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_75, (512,), (1,))
assert_size_stride(primals_76, (512,), (1,))
assert_size_stride(primals_77, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_78, (512,), (1,))
assert_size_stride(primals_79, (512,), (1,))
assert_size_stride(primals_80, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_81, (2048,), (1,))
assert_size_stride(primals_82, (2048,), (1,))
assert_size_stride(primals_83, (4096, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_84, (4096,), (1,))
assert_size_stride(primals_85, (4096,), (1,))
assert_size_stride(primals_86, (1024, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_87, (1024,), (1,))
assert_size_stride(primals_88, (1024,), (1,))
assert_size_stride(primals_89, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_90, (1024,), (1,))
assert_size_stride(primals_91, (1024,), (1,))
assert_size_stride(primals_92, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_93, (4096,), (1,))
assert_size_stride(primals_94, (4096,), (1,))
assert_size_stride(primals_95, (1024, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_96, (1024,), (1,))
assert_size_stride(primals_97, (1024,), (1,))
assert_size_stride(primals_98, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_99, (1024,), (1,))
assert_size_stride(primals_100, (1024,), (1,))
assert_size_stride(primals_101, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_102, (4096,), (1,))
assert_size_stride(primals_103, (4096,), (1,))
assert_size_stride(primals_104, (1024, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_105, (1024,), (1,))
assert_size_stride(primals_106, (1024,), (1,))
assert_size_stride(primals_107, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_108, (1024,), (1,))
assert_size_stride(primals_109, (1024,), (1,))
assert_size_stride(primals_110, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_111, (4096,), (1,))
assert_size_stride(primals_112, (4096,), (1,))
assert_size_stride(primals_113, (1024, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_114, (1024,), (1,))
assert_size_stride(primals_115, (1024,), (1,))
assert_size_stride(primals_116, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_117, (1024,), (1,))
assert_size_stride(primals_118, (1024,), (1,))
assert_size_stride(primals_119, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_120, (4096,), (1,))
assert_size_stride(primals_121, (4096,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch.
float32)
get_raw_stream(0)
triton_poi_fused_0[grid(768, 49)](primals_1, buf0, 768, 49, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
triton_poi_fused_1[grid(12, 4096)](primals_2, buf1, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_11, buf2, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_11
buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_20, buf3, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_20
buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_29, buf4, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_29
buf5 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_38, buf5, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_38
buf6 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_3[grid(262144, 9)](primals_50, buf6, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_50
buf7 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_3[grid(262144, 9)](primals_59, buf7, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_59
buf8 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_3[grid(262144, 9)](primals_68, buf8, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_68
buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_3[grid(262144, 9)](primals_77, buf9, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_77
buf10 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024
), torch.float32)
triton_poi_fused_4[grid(1048576, 9)](primals_89, buf10, 1048576, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_89
buf11 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024
), torch.float32)
triton_poi_fused_4[grid(1048576, 9)](primals_98, buf11, 1048576, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_98
buf12 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024
), torch.float32)
triton_poi_fused_4[grid(1048576, 9)](primals_107, buf12, 1048576, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_107
buf13 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024
), torch.float32)
triton_poi_fused_4[grid(1048576, 9)](primals_116, buf13, 1048576, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_116
buf15 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf17 = reinterpret_tensor(buf15, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf15
buf18 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch.
float32)
triton_per_fused_add_div_sqrt_sub_var_mean_5[grid(256)](buf17, buf0,
buf18, 256, 147, XBLOCK=1, num_warps=2, num_stages=1)
buf19 = extern_kernels.convolution(buf1, buf18, stride=(2, 2),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 256, 32, 32), (262144, 1, 8192, 256))
buf20 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf21 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf23 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_6[grid(128)](buf19, buf20, buf21,
buf23, 128, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1
)
buf24 = empty_strided_cuda((4, 256, 32, 32), (262144, 1, 8192, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_7[grid(1048576)](buf19,
buf20, buf21, primals_3, primals_4, buf24, 1048576, XBLOCK=512,
num_warps=8, num_stages=1)
del primals_4
buf25 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256),
torch.float32)
buf26 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_8[grid(230400)](buf24,
buf25, buf26, 230400, XBLOCK=512, num_warps=8, num_stages=1)
buf28 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf30 = reinterpret_tensor(buf28, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf28
buf31 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_9[grid(1024)](buf30,
primals_5, buf31, 1024, 256, num_warps=2, num_stages=1)
buf32 = extern_kernels.convolution(buf25, buf31, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 1024, 15, 15), (230400, 1, 15360, 1024))
buf33 = empty_strided_cuda((4, 1024, 1, 1), (1024, 1, 4096, 4096),
torch.float32)
buf34 = empty_strided_cuda((4, 1024, 1, 1), (1024, 1, 4096, 4096),
torch.float32)
buf36 = empty_strided_cuda((4, 1024, 1, 1), (1024, 1, 4096, 4096),
torch.float32)
triton_per_fused_native_group_norm_10[grid(4096)](buf32, buf33,
buf34, buf36, 4096, 225, XBLOCK=1, num_warps=2, num_stages=1)
buf38 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf40 = reinterpret_tensor(buf38, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf38
buf41 = empty_strided_cuda((256, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_11[grid(256)](buf40,
primals_8, buf41, 256, 256, num_warps=2, num_stages=1)
buf42 = extern_kernels.convolution(buf25, buf41, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf43 = buf21
del buf21
buf44 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf46 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_12[grid(128)](buf42, buf43,
buf44, buf46, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf47 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf42,
buf43, buf44, primals_9, primals_10, buf47, 230400, XBLOCK=512,
num_warps=8, num_stages=1)
del primals_10
buf49 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf51 = reinterpret_tensor(buf49, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf49
buf52 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_14[grid(256)](buf51,
buf2, buf52, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf53 = extern_kernels.convolution(buf47, buf52, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf53, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf54 = buf44
del buf44
buf55 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf57 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_12[grid(128)](buf53, buf54,
buf55, buf57, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf58 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf53,
buf54, buf55, primals_12, primals_13, buf58, 230400, XBLOCK=512,
num_warps=8, num_stages=1)
del primals_13
buf60 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf62 = reinterpret_tensor(buf60, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf60
buf63 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_9[grid(1024)](buf62,
primals_14, buf63, 1024, 256, num_warps=2, num_stages=1)
buf64 = extern_kernels.convolution(buf58, buf63, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf64, (4, 1024, 15, 15), (230400, 1, 15360, 1024))
buf65 = buf55
del buf55
buf66 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf68 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_15[grid(128)](buf64, buf65,
buf66, buf68, 128, 7200, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf69 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360,
1024), torch.float32)
buf70 = buf69
del buf69
triton_poi_fused_add_native_group_norm_relu_16[grid(921600)](buf70,
buf32, buf33, buf34, primals_6, primals_7, buf64, buf65, buf66,
primals_15, primals_16, 921600, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_16
del primals_7
buf72 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf74 = reinterpret_tensor(buf72, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf72
buf75 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_17[grid(256)](buf74,
primals_17, buf75, 256, 1024, num_warps=8, num_stages=1)
buf76 = extern_kernels.convolution(buf70, buf75, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf76, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf77 = buf66
del buf66
buf78 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf80 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_12[grid(128)](buf76, buf77,
buf78, buf80, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf81 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf76,
buf77, buf78, primals_18, primals_19, buf81, 230400, XBLOCK=512,
num_warps=8, num_stages=1)
del primals_19
buf83 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf85 = reinterpret_tensor(buf83, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf83
buf86 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_14[grid(256)](buf85,
buf3, buf86, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf87 = extern_kernels.convolution(buf81, buf86, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf87, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf88 = buf78
del buf78
buf89 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf91 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_12[grid(128)](buf87, buf88,
buf89, buf91, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf92 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf87,
buf88, buf89, primals_21, primals_22, buf92, 230400, XBLOCK=512,
num_warps=8, num_stages=1)
del primals_22
buf94 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf96 = reinterpret_tensor(buf94, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf94
buf97 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_9[grid(1024)](buf96,
primals_23, buf97, 1024, 256, num_warps=2, num_stages=1)
buf98 = extern_kernels.convolution(buf92, buf97, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf98, (4, 1024, 15, 15), (230400, 1, 15360, 1024))
buf99 = buf89
del buf89
buf100 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf102 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_15[grid(128)](buf98, buf99,
buf100, buf102, 128, 7200, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf103 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360,
1024), torch.float32)
triton_poi_fused_add_native_group_norm_relu_18[grid(921600)](buf70,
buf98, buf99, buf100, primals_24, primals_25, buf103, 921600,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_25
buf105 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf107 = reinterpret_tensor(buf105, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf105
buf108 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024
), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_17[grid(256)](buf107,
primals_26, buf108, 256, 1024, num_warps=8, num_stages=1)
buf109 = extern_kernels.convolution(buf103, buf108, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf109, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf110 = buf100
del buf100
buf111 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf113 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_12[grid(128)](buf109, buf110,
buf111, buf113, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf114 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf109,
buf110, buf111, primals_27, primals_28, buf114, 230400, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_28
buf116 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf118 = reinterpret_tensor(buf116, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf116
buf119 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_14[grid(256)](buf118,
buf4, buf119, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf120 = extern_kernels.convolution(buf114, buf119, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf120, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf121 = buf111
del buf111
buf122 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf124 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_12[grid(128)](buf120, buf121,
buf122, buf124, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf125 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf120,
buf121, buf122, primals_30, primals_31, buf125, 230400, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_31
buf127 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf129 = reinterpret_tensor(buf127, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf127
buf130 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_9[grid(1024)](buf129,
primals_32, buf130, 1024, 256, num_warps=2, num_stages=1)
buf131 = extern_kernels.convolution(buf125, buf130, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf131, (4, 1024, 15, 15), (230400, 1, 15360, 1024))
buf132 = buf122
del buf122
buf133 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf135 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_15[grid(128)](buf131, buf132,
buf133, buf135, 128, 7200, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf136 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360,
1024), torch.float32)
triton_poi_fused_add_native_group_norm_relu_18[grid(921600)](buf103,
buf131, buf132, buf133, primals_33, primals_34, buf136, 921600,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_34
buf138 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf140 = reinterpret_tensor(buf138, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf138
buf141 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024
), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_17[grid(256)](buf140,
primals_35, buf141, 256, 1024, num_warps=8, num_stages=1)
buf142 = extern_kernels.convolution(buf136, buf141, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf142, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf143 = buf133
del buf133
buf144 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf146 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_12[grid(128)](buf142, buf143,
buf144, buf146, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf147 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf142,
buf143, buf144, primals_36, primals_37, buf147, 230400, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_37
buf149 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf151 = reinterpret_tensor(buf149, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf149
buf152 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_14[grid(256)](buf151,
buf5, buf152, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf153 = extern_kernels.convolution(buf147, buf152, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf153, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf154 = buf144
del buf144
buf155 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf157 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_12[grid(128)](buf153, buf154,
buf155, buf157, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf158 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf153,
buf154, buf155, primals_39, primals_40, buf158, 230400, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_40
buf160 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf162 = reinterpret_tensor(buf160, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf160
buf163 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_9[grid(1024)](buf162,
primals_41, buf163, 1024, 256, num_warps=2, num_stages=1)
buf164 = extern_kernels.convolution(buf158, buf163, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf164, (4, 1024, 15, 15), (230400, 1, 15360, 1024))
buf165 = buf155
del buf155
buf166 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf168 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_15[grid(128)](buf164, buf165,
buf166, buf168, 128, 7200, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf169 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360,
1024), torch.float32)
triton_poi_fused_add_native_group_norm_relu_18[grid(921600)](buf136,
buf164, buf165, buf166, primals_42, primals_43, buf169, 921600,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_43
buf171 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf173 = reinterpret_tensor(buf171, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf171
buf174 = empty_strided_cuda((2048, 1024, 1, 1), (1024, 1, 1024,
1024), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_19[grid(2048)](buf173,
primals_44, buf174, 2048, 1024, num_warps=8, num_stages=1)
buf175 = extern_kernels.convolution(buf169, buf174, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf175, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf176 = empty_strided_cuda((4, 2048, 1, 1), (2048, 1, 8192, 8192),
torch.float32)
buf177 = empty_strided_cuda((4, 2048, 1, 1), (2048, 1, 8192, 8192),
torch.float32)
buf179 = empty_strided_cuda((4, 2048, 1, 1), (2048, 1, 8192, 8192),
torch.float32)
triton_per_fused_native_group_norm_20[grid(8192)](buf175, buf176,
buf177, buf179, 8192, 64, XBLOCK=8, num_warps=4, num_stages=1)
buf181 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf183 = reinterpret_tensor(buf181, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf181
buf184 = empty_strided_cuda((512, 1024, 1, 1), (1024, 1, 1024, 1024
), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_21[grid(512)](buf183,
primals_47, buf184, 512, 1024, num_warps=8, num_stages=1)
buf185 = extern_kernels.convolution(buf169, buf184, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf185, (4, 512, 15, 15), (115200, 1, 7680, 512))
buf186 = buf166
del buf166
buf187 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf189 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_22[grid(128)](buf185, buf186,
buf187, buf189, 128, 3600, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf190 = empty_strided_cuda((4, 512, 15, 15), (115200, 1, 7680, 512
), torch.float32)
triton_poi_fused_native_group_norm_relu_23[grid(460800)](buf185,
buf186, buf187, primals_48, primals_49, buf190, 460800, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_49
buf192 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf194 = reinterpret_tensor(buf192, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf192
buf195 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_24[grid(512)](buf194,
buf6, buf195, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf196 = extern_kernels.convolution(buf190, buf195, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf196, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf197 = buf187
del buf187
buf198 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf200 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_25[grid(128)](buf196, buf197,
buf198, buf200, 128, 1024, num_warps=8, num_stages=1)
buf201 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf196,
buf197, buf198, primals_51, primals_52, buf201, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_52
buf203 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf205 = reinterpret_tensor(buf203, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf203
buf206 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_27[grid(2048)](buf205,
primals_53, buf206, 2048, 512, num_warps=4, num_stages=1)
buf207 = extern_kernels.convolution(buf201, buf206, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf207, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf208 = buf198
del buf198
buf209 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf211 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_28[grid(128)](buf207, buf208,
buf209, buf211, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf212 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384,
2048), torch.float32)
buf213 = buf212
del buf212
triton_poi_fused_add_native_group_norm_relu_29[grid(524288)](buf213,
buf175, buf176, buf177, primals_45, primals_46, buf207, buf208,
buf209, primals_54, primals_55, 524288, XBLOCK=512, num_warps=8,
num_stages=1)
del buf177
del primals_46
del primals_55
buf215 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf217 = reinterpret_tensor(buf215, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf215
buf218 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048
), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_30[grid(512)](buf217,
primals_56, buf218, 512, 2048, XBLOCK=1, RBLOCK=2048, num_warps
=16, num_stages=1)
buf219 = extern_kernels.convolution(buf213, buf218, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf219, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf220 = buf209
del buf209
buf221 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf223 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_25[grid(128)](buf219, buf220,
buf221, buf223, 128, 1024, num_warps=8, num_stages=1)
buf224 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf219,
buf220, buf221, primals_57, primals_58, buf224, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_58
buf226 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf228 = reinterpret_tensor(buf226, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf226
buf229 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_24[grid(512)](buf228,
buf7, buf229, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf230 = extern_kernels.convolution(buf224, buf229, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf230, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf231 = buf221
del buf221
buf232 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf234 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_25[grid(128)](buf230, buf231,
buf232, buf234, 128, 1024, num_warps=8, num_stages=1)
buf235 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf230,
buf231, buf232, primals_60, primals_61, buf235, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_61
buf237 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf239 = reinterpret_tensor(buf237, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf237
buf240 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_27[grid(2048)](buf239,
primals_62, buf240, 2048, 512, num_warps=4, num_stages=1)
buf241 = extern_kernels.convolution(buf235, buf240, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf241, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf242 = buf232
del buf232
buf243 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf245 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_28[grid(128)](buf241, buf242,
buf243, buf245, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf246 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384,
2048), torch.float32)
triton_poi_fused_add_native_group_norm_relu_31[grid(524288)](buf213,
buf241, buf242, buf243, primals_63, primals_64, buf246, 524288,
XBLOCK=1024, num_warps=4, num_stages=1)
del primals_64
buf248 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf250 = reinterpret_tensor(buf248, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf248
buf251 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048
), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_30[grid(512)](buf250,
primals_65, buf251, 512, 2048, XBLOCK=1, RBLOCK=2048, num_warps
=16, num_stages=1)
buf252 = extern_kernels.convolution(buf246, buf251, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf252, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf253 = buf243
del buf243
buf254 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf256 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_25[grid(128)](buf252, buf253,
buf254, buf256, 128, 1024, num_warps=8, num_stages=1)
buf257 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf252,
buf253, buf254, primals_66, primals_67, buf257, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_67
buf259 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf261 = reinterpret_tensor(buf259, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf259
buf262 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_24[grid(512)](buf261,
buf8, buf262, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf263 = extern_kernels.convolution(buf257, buf262, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf263, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf264 = buf254
del buf254
buf265 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf267 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_25[grid(128)](buf263, buf264,
buf265, buf267, 128, 1024, num_warps=8, num_stages=1)
buf268 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf263,
buf264, buf265, primals_69, primals_70, buf268, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_70
buf270 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf272 = reinterpret_tensor(buf270, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf270
buf273 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_27[grid(2048)](buf272,
primals_71, buf273, 2048, 512, num_warps=4, num_stages=1)
buf274 = extern_kernels.convolution(buf268, buf273, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf274, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf275 = buf265
del buf265
buf276 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf278 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_28[grid(128)](buf274, buf275,
buf276, buf278, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf279 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384,
2048), torch.float32)
triton_poi_fused_add_native_group_norm_relu_31[grid(524288)](buf246,
buf274, buf275, buf276, primals_72, primals_73, buf279, 524288,
XBLOCK=1024, num_warps=4, num_stages=1)
del primals_73
buf281 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf283 = reinterpret_tensor(buf281, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf281
buf284 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048
), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_30[grid(512)](buf283,
primals_74, buf284, 512, 2048, XBLOCK=1, RBLOCK=2048, num_warps
=16, num_stages=1)
buf285 = extern_kernels.convolution(buf279, buf284, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf285, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf286 = buf276
del buf276
buf287 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf289 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_25[grid(128)](buf285, buf286,
buf287, buf289, 128, 1024, num_warps=8, num_stages=1)
buf290 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf285,
buf286, buf287, primals_75, primals_76, buf290, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_76
buf292 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf294 = reinterpret_tensor(buf292, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf292
buf295 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_24[grid(512)](buf294,
buf9, buf295, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf296 = extern_kernels.convolution(buf290, buf295, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf296, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf297 = buf287
del buf287
buf298 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf300 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_25[grid(128)](buf296, buf297,
buf298, buf300, 128, 1024, num_warps=8, num_stages=1)
buf301 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf296,
buf297, buf298, primals_78, primals_79, buf301, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_79
buf303 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf305 = reinterpret_tensor(buf303, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf303
buf306 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_27[grid(2048)](buf305,
primals_80, buf306, 2048, 512, num_warps=4, num_stages=1)
buf307 = extern_kernels.convolution(buf301, buf306, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf307, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf308 = buf298
del buf298
buf309 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf311 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_28[grid(128)](buf307, buf308,
buf309, buf311, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf312 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384,
2048), torch.float32)
triton_poi_fused_add_native_group_norm_relu_31[grid(524288)](buf279,
buf307, buf308, buf309, primals_81, primals_82, buf312, 524288,
XBLOCK=1024, num_warps=4, num_stages=1)
del primals_82
buf314 = reinterpret_tensor(buf34, (4096, 1, 1, 1), (1, 4096, 4096,
4096), 0)
del buf34
buf316 = reinterpret_tensor(buf314, (4096, 1, 1, 1), (1, 1, 1, 1), 0)
del buf314
buf317 = empty_strided_cuda((4096, 2048, 1, 1), (2048, 1, 2048,
2048), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_32[grid(4096)](buf316,
primals_83, buf317, 4096, 2048, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf318 = extern_kernels.convolution(buf312, buf317, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf318, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf319 = empty_strided_cuda((4, 4096, 1, 1), (4096, 1, 16384, 16384
), torch.float32)
buf320 = empty_strided_cuda((4, 4096, 1, 1), (4096, 1, 16384, 16384
), torch.float32)
buf322 = empty_strided_cuda((4, 4096, 1, 1), (4096, 1, 16384, 16384
), torch.float32)
triton_per_fused_native_group_norm_33[grid(16384)](buf318, buf319,
buf320, buf322, 16384, 16, XBLOCK=32, num_warps=4, num_stages=1)
buf324 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf326 = reinterpret_tensor(buf324, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf324
buf327 = empty_strided_cuda((1024, 2048, 1, 1), (2048, 1, 2048,
2048), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_34[grid(1024)](buf326,
primals_86, buf327, 1024, 2048, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf328 = extern_kernels.convolution(buf312, buf327, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf328, (4, 1024, 8, 8), (65536, 1, 8192, 1024))
buf329 = buf309
del buf309
buf330 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf332 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_35[grid(128)](buf328, buf329,
buf330, buf332, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf333 = empty_strided_cuda((4, 1024, 8, 8), (65536, 1, 8192, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_36[grid(262144)](buf328,
buf329, buf330, primals_87, primals_88, buf333, 262144, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_88
buf335 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf337 = reinterpret_tensor(buf335, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf335
buf338 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072,
1024), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_37[grid(1024)](buf337,
buf10, buf338, 1024, 9216, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf339 = extern_kernels.convolution(buf333, buf338, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf339, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf340 = buf330
del buf330
buf341 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf343 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_38[grid(128)](buf339, buf340,
buf341, buf343, 128, 512, num_warps=4, num_stages=1)
buf344 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf339,
buf340, buf341, primals_90, primals_91, buf344, 65536, XBLOCK=
512, num_warps=4, num_stages=1)
del primals_91
buf346 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096),
torch.float32)
buf348 = reinterpret_tensor(buf346, (4096, 1, 1, 1), (1, 1, 1, 1), 0)
del buf346
buf349 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024,
1024), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_40[grid(4096)](buf348,
primals_92, buf349, 4096, 1024, num_warps=8, num_stages=1)
buf350 = extern_kernels.convolution(buf344, buf349, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf350, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf351 = buf341
del buf341
buf352 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf354 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_41[grid(128)](buf350, buf351,
buf352, buf354, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf355 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096
), torch.float32)
buf356 = buf355
del buf355
triton_poi_fused_add_native_group_norm_relu_42[grid(262144)](buf356,
buf318, buf319, buf320, primals_84, primals_85, buf350, buf351,
buf352, primals_93, primals_94, 262144, XBLOCK=512, num_warps=8,
num_stages=1)
del buf320
del primals_85
del primals_94
buf358 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf360 = reinterpret_tensor(buf358, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf358
buf361 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096,
4096), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_43[grid(1024)](buf360,
primals_95, buf361, 1024, 4096, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf362 = extern_kernels.convolution(buf356, buf361, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf362, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf363 = buf352
del buf352
buf364 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf366 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_38[grid(128)](buf362, buf363,
buf364, buf366, 128, 512, num_warps=4, num_stages=1)
buf367 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf362,
buf363, buf364, primals_96, primals_97, buf367, 65536, XBLOCK=
512, num_warps=4, num_stages=1)
del primals_97
buf369 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf371 = reinterpret_tensor(buf369, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf369
buf372 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072,
1024), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_37[grid(1024)](buf371,
buf11, buf372, 1024, 9216, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf373 = extern_kernels.convolution(buf367, buf372, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf373, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf374 = buf364
del buf364
buf375 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf377 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_38[grid(128)](buf373, buf374,
buf375, buf377, 128, 512, num_warps=4, num_stages=1)
buf378 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf373,
buf374, buf375, primals_99, primals_100, buf378, 65536, XBLOCK=
512, num_warps=4, num_stages=1)
del primals_100
buf380 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096),
torch.float32)
buf382 = reinterpret_tensor(buf380, (4096, 1, 1, 1), (1, 1, 1, 1), 0)
del buf380
buf383 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024,
1024), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_40[grid(4096)](buf382,
primals_101, buf383, 4096, 1024, num_warps=8, num_stages=1)
buf384 = extern_kernels.convolution(buf378, buf383, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf384, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf385 = buf375
del buf375
buf386 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf388 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_41[grid(128)](buf384, buf385,
buf386, buf388, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf389 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096
), torch.float32)
triton_poi_fused_add_native_group_norm_relu_44[grid(262144)](buf356,
buf384, buf385, buf386, primals_102, primals_103, buf389,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_103
buf391 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf393 = reinterpret_tensor(buf391, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf391
buf394 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096,
4096), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_43[grid(1024)](buf393,
primals_104, buf394, 1024, 4096, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf395 = extern_kernels.convolution(buf389, buf394, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf395, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf396 = buf386
del buf386
buf397 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf399 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_38[grid(128)](buf395, buf396,
buf397, buf399, 128, 512, num_warps=4, num_stages=1)
buf400 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf395,
buf396, buf397, primals_105, primals_106, buf400, 65536, XBLOCK
=512, num_warps=4, num_stages=1)
del primals_106
buf402 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf404 = reinterpret_tensor(buf402, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf402
buf405 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072,
1024), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_37[grid(1024)](buf404,
buf12, buf405, 1024, 9216, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf406 = extern_kernels.convolution(buf400, buf405, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf406, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf407 = buf397
del buf397
buf408 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf410 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_38[grid(128)](buf406, buf407,
buf408, buf410, 128, 512, num_warps=4, num_stages=1)
buf411 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf406,
buf407, buf408, primals_108, primals_109, buf411, 65536, XBLOCK
=512, num_warps=4, num_stages=1)
del primals_109
buf413 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096),
torch.float32)
buf415 = reinterpret_tensor(buf413, (4096, 1, 1, 1), (1, 1, 1, 1), 0)
del buf413
buf416 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024,
1024), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_40[grid(4096)](buf415,
primals_110, buf416, 4096, 1024, num_warps=8, num_stages=1)
buf417 = extern_kernels.convolution(buf411, buf416, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf417, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf418 = buf408
del buf408
buf419 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf421 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_41[grid(128)](buf417, buf418,
buf419, buf421, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf422 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096
), torch.float32)
triton_poi_fused_add_native_group_norm_relu_44[grid(262144)](buf389,
buf417, buf418, buf419, primals_111, primals_112, buf422,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_112
buf424 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf426 = reinterpret_tensor(buf424, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf424
buf427 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096,
4096), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_43[grid(1024)](buf426,
primals_113, buf427, 1024, 4096, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf428 = extern_kernels.convolution(buf422, buf427, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf428, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf429 = buf419
del buf419
buf430 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf432 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_38[grid(128)](buf428, buf429,
buf430, buf432, 128, 512, num_warps=4, num_stages=1)
buf433 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf428,
buf429, buf430, primals_114, primals_115, buf433, 65536, XBLOCK
=512, num_warps=4, num_stages=1)
del primals_115
buf435 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf437 = reinterpret_tensor(buf435, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf435
buf438 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072,
1024), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_37[grid(1024)](buf437,
buf13, buf438, 1024, 9216, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf439 = extern_kernels.convolution(buf433, buf438, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf439, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf440 = buf430
del buf430
buf441 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf443 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_38[grid(128)](buf439, buf440,
buf441, buf443, 128, 512, num_warps=4, num_stages=1)
buf444 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf439,
buf440, buf441, primals_117, primals_118, buf444, 65536, XBLOCK
=512, num_warps=4, num_stages=1)
del primals_118
buf446 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096),
torch.float32)
buf448 = reinterpret_tensor(buf446, (4096, 1, 1, 1), (1, 1, 1, 1), 0)
del buf446
buf449 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024,
1024), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_40[grid(4096)](buf448,
primals_119, buf449, 4096, 1024, num_warps=8, num_stages=1)
buf450 = extern_kernels.convolution(buf444, buf449, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf450, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf451 = buf441
del buf441
buf452 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf454 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_41[grid(128)](buf450, buf451,
buf452, buf454, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf455 = empty_strided_cuda((4, 4096, 4, 4), (65536, 16, 4, 1),
torch.float32)
triton_poi_fused_add_native_group_norm_relu_45[grid(64, 4096)](buf422,
buf450, buf451, buf452, primals_120, primals_121, buf455, 64,
4096, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del buf452
del primals_121
buf456 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096
), torch.bool)
triton_poi_fused_threshold_backward_46[grid(16384, 16)](buf455,
buf456, 16384, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
return (buf455, buf0, buf1, primals_3, primals_5, primals_6, primals_8,
primals_9, buf2, primals_12, primals_14, primals_15, primals_17,
primals_18, buf3, primals_21, primals_23, primals_24, primals_26,
primals_27, buf4, primals_30, primals_32, primals_33, primals_35,
primals_36, buf5, primals_39, primals_41, primals_42, primals_44,
primals_45, primals_47, primals_48, buf6, primals_51, primals_53,
primals_54, primals_56, primals_57, buf7, primals_60, primals_62,
primals_63, primals_65, primals_66, buf8, primals_69, primals_71,
primals_72, primals_74, primals_75, buf9, primals_78, primals_80,
primals_81, primals_83, primals_84, primals_86, primals_87, buf10,
primals_90, primals_92, primals_93, primals_95, primals_96, buf11,
primals_99, primals_101, primals_102, primals_104, primals_105,
buf12, primals_108, primals_110, primals_111, primals_113,
primals_114, buf13, primals_117, primals_119, primals_120, buf17,
buf18, buf19, reinterpret_tensor(buf20, (4, 32), (32, 1), 0),
reinterpret_tensor(buf23, (4, 32), (32, 1), 0), buf24, buf25, buf26,
buf30, buf31, buf32, reinterpret_tensor(buf33, (4, 1024), (1024, 1),
0), reinterpret_tensor(buf36, (4, 1024), (1024, 1), 0), buf40,
buf41, buf42, reinterpret_tensor(buf43, (4, 32), (32, 1), 0),
reinterpret_tensor(buf46, (4, 32), (32, 1), 0), buf47, buf51, buf52,
buf53, reinterpret_tensor(buf54, (4, 32), (32, 1), 0),
reinterpret_tensor(buf57, (4, 32), (32, 1), 0), buf58, buf62, buf63,
buf64, reinterpret_tensor(buf65, (4, 32), (32, 1), 0),
reinterpret_tensor(buf68, (4, 32), (32, 1), 0), buf70, buf74, buf75,
buf76, reinterpret_tensor(buf77, (4, 32), (32, 1), 0),
reinterpret_tensor(buf80, (4, 32), (32, 1), 0), buf81, buf85, buf86,
buf87, reinterpret_tensor(buf88, (4, 32), (32, 1), 0),
reinterpret_tensor(buf91, (4, 32), (32, 1), 0), buf92, buf96, buf97,
buf98, reinterpret_tensor(buf99, (4, 32), (32, 1), 0),
reinterpret_tensor(buf102, (4, 32), (32, 1), 0), buf103, buf107,
buf108, buf109, reinterpret_tensor(buf110, (4, 32), (32, 1), 0),
reinterpret_tensor(buf113, (4, 32), (32, 1), 0), buf114, buf118,
buf119, buf120, reinterpret_tensor(buf121, (4, 32), (32, 1), 0),
reinterpret_tensor(buf124, (4, 32), (32, 1), 0), buf125, buf129,
buf130, buf131, reinterpret_tensor(buf132, (4, 32), (32, 1), 0),
reinterpret_tensor(buf135, (4, 32), (32, 1), 0), buf136, buf140,
buf141, buf142, reinterpret_tensor(buf143, (4, 32), (32, 1), 0),
reinterpret_tensor(buf146, (4, 32), (32, 1), 0), buf147, buf151,
buf152, buf153, reinterpret_tensor(buf154, (4, 32), (32, 1), 0),
reinterpret_tensor(buf157, (4, 32), (32, 1), 0), buf158, buf162,
buf163, buf164, reinterpret_tensor(buf165, (4, 32), (32, 1), 0),
reinterpret_tensor(buf168, (4, 32), (32, 1), 0), buf169, buf173,
buf174, buf175, reinterpret_tensor(buf176, (4, 2048), (2048, 1), 0),
reinterpret_tensor(buf179, (4, 2048), (2048, 1), 0), buf183, buf184,
buf185, reinterpret_tensor(buf186, (4, 32), (32, 1), 0),
reinterpret_tensor(buf189, (4, 32), (32, 1), 0), buf190, buf194,
buf195, buf196, reinterpret_tensor(buf197, (4, 32), (32, 1), 0),
reinterpret_tensor(buf200, (4, 32), (32, 1), 0), buf201, buf205,
buf206, buf207, reinterpret_tensor(buf208, (4, 32), (32, 1), 0),
reinterpret_tensor(buf211, (4, 32), (32, 1), 0), buf213, buf217,
buf218, buf219, reinterpret_tensor(buf220, (4, 32), (32, 1), 0),
reinterpret_tensor(buf223, (4, 32), (32, 1), 0), buf224, buf228,
buf229, buf230, reinterpret_tensor(buf231, (4, 32), (32, 1), 0),
reinterpret_tensor(buf234, (4, 32), (32, 1), 0), buf235, buf239,
buf240, buf241, reinterpret_tensor(buf242, (4, 32), (32, 1), 0),
reinterpret_tensor(buf245, (4, 32), (32, 1), 0), buf246, buf250,
buf251, buf252, reinterpret_tensor(buf253, (4, 32), (32, 1), 0),
reinterpret_tensor(buf256, (4, 32), (32, 1), 0), buf257, buf261,
buf262, buf263, reinterpret_tensor(buf264, (4, 32), (32, 1), 0),
reinterpret_tensor(buf267, (4, 32), (32, 1), 0), buf268, buf272,
buf273, buf274, reinterpret_tensor(buf275, (4, 32), (32, 1), 0),
reinterpret_tensor(buf278, (4, 32), (32, 1), 0), buf279, buf283,
buf284, buf285, reinterpret_tensor(buf286, (4, 32), (32, 1), 0),
reinterpret_tensor(buf289, (4, 32), (32, 1), 0), buf290, buf294,
buf295, buf296, reinterpret_tensor(buf297, (4, 32), (32, 1), 0),
reinterpret_tensor(buf300, (4, 32), (32, 1), 0), buf301, buf305,
buf306, buf307, reinterpret_tensor(buf308, (4, 32), (32, 1), 0),
reinterpret_tensor(buf311, (4, 32), (32, 1), 0), buf312, buf316,
buf317, buf318, reinterpret_tensor(buf319, (4, 4096), (4096, 1), 0),
reinterpret_tensor(buf322, (4, 4096), (4096, 1), 0), buf326, buf327,
buf328, reinterpret_tensor(buf329, (4, 32), (32, 1), 0),
reinterpret_tensor(buf332, (4, 32), (32, 1), 0), buf333, buf337,
buf338, buf339, reinterpret_tensor(buf340, (4, 32), (32, 1), 0),
reinterpret_tensor(buf343, (4, 32), (32, 1), 0), buf344, buf348,
buf349, buf350, reinterpret_tensor(buf351, (4, 32), (32, 1), 0),
reinterpret_tensor(buf354, (4, 32), (32, 1), 0), buf356, buf360,
buf361, buf362, reinterpret_tensor(buf363, (4, 32), (32, 1), 0),
reinterpret_tensor(buf366, (4, 32), (32, 1), 0), buf367, buf371,
buf372, buf373, reinterpret_tensor(buf374, (4, 32), (32, 1), 0),
reinterpret_tensor(buf377, (4, 32), (32, 1), 0), buf378, buf382,
buf383, buf384, reinterpret_tensor(buf385, (4, 32), (32, 1), 0),
reinterpret_tensor(buf388, (4, 32), (32, 1), 0), buf389, buf393,
buf394, buf395, reinterpret_tensor(buf396, (4, 32), (32, 1), 0),
reinterpret_tensor(buf399, (4, 32), (32, 1), 0), buf400, buf404,
buf405, buf406, reinterpret_tensor(buf407, (4, 32), (32, 1), 0),
reinterpret_tensor(buf410, (4, 32), (32, 1), 0), buf411, buf415,
buf416, buf417, reinterpret_tensor(buf418, (4, 32), (32, 1), 0),
reinterpret_tensor(buf421, (4, 32), (32, 1), 0), buf422, buf426,
buf427, buf428, reinterpret_tensor(buf429, (4, 32), (32, 1), 0),
reinterpret_tensor(buf432, (4, 32), (32, 1), 0), buf433, buf437,
buf438, buf439, reinterpret_tensor(buf440, (4, 32), (32, 1), 0),
reinterpret_tensor(buf443, (4, 32), (32, 1), 0), buf444, buf448,
buf449, buf450, reinterpret_tensor(buf451, (4, 32), (32, 1), 0),
reinterpret_tensor(buf454, (4, 32), (32, 1), 0), buf456)
def conv1x1(cin, cout, stride=1, bias=False):
return StdConv2d(cin, cout, kernel_size=1, stride=stride, padding=0,
bias=bias)
def conv3x3(cin, cout, stride=1, groups=1, bias=False):
return StdConv2d(cin, cout, kernel_size=3, stride=stride, padding=1,
bias=bias, groups=groups)
def np2th(weights, conv=False):
"""Possibly convert HWIO to OIHW."""
if conv:
weights = weights.transpose([3, 2, 0, 1])
return torch.from_numpy(weights)
class StdConv2d(nn.Conv2d):
def forward(self, x):
w = self.weight
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-05)
return F.conv2d(x, w, self.bias, self.stride, self.padding, self.
dilation, self.groups)
class PreActBottleneck(nn.Module):
"""Pre-activation (v2) bottleneck block.
"""
def __init__(self, cin, cout=None, cmid=None, stride=1):
super().__init__()
cout = cout or cin
cmid = cmid or cout // 4
self.gn1 = nn.GroupNorm(32, cmid, eps=1e-06)
self.conv1 = conv1x1(cin, cmid, bias=False)
self.gn2 = nn.GroupNorm(32, cmid, eps=1e-06)
self.conv2 = conv3x3(cmid, cmid, stride, bias=False)
self.gn3 = nn.GroupNorm(32, cout, eps=1e-06)
self.conv3 = conv1x1(cmid, cout, bias=False)
self.relu = nn.ReLU(inplace=True)
if stride != 1 or cin != cout:
self.downsample = conv1x1(cin, cout, stride, bias=False)
self.gn_proj = nn.GroupNorm(cout, cout)
def forward(self, x):
residual = x
if hasattr(self, 'downsample'):
residual = self.downsample(x)
residual = self.gn_proj(residual)
y = self.relu(self.gn1(self.conv1(x)))
y = self.relu(self.gn2(self.conv2(y)))
y = self.gn3(self.conv3(y))
y = self.relu(residual + y)
return y
def load_from(self, weights, n_block, n_unit):
conv1_weight = np2th(weights[pjoin(n_block, n_unit, 'conv1/kernel')
], conv=True)
conv2_weight = np2th(weights[pjoin(n_block, n_unit, 'conv2/kernel')
], conv=True)
conv3_weight = np2th(weights[pjoin(n_block, n_unit, 'conv3/kernel')
], conv=True)
gn1_weight = np2th(weights[pjoin(n_block, n_unit, 'gn1/scale')])
gn1_bias = np2th(weights[pjoin(n_block, n_unit, 'gn1/bias')])
gn2_weight = np2th(weights[pjoin(n_block, n_unit, 'gn2/scale')])
gn2_bias = np2th(weights[pjoin(n_block, n_unit, 'gn2/bias')])
gn3_weight = np2th(weights[pjoin(n_block, n_unit, 'gn3/scale')])
gn3_bias = np2th(weights[pjoin(n_block, n_unit, 'gn3/bias')])
self.conv1.weight.copy_(conv1_weight)
self.conv2.weight.copy_(conv2_weight)
self.conv3.weight.copy_(conv3_weight)
self.gn1.weight.copy_(gn1_weight.view(-1))
self.gn1.bias.copy_(gn1_bias.view(-1))
self.gn2.weight.copy_(gn2_weight.view(-1))
self.gn2.bias.copy_(gn2_bias.view(-1))
self.gn3.weight.copy_(gn3_weight.view(-1))
self.gn3.bias.copy_(gn3_bias.view(-1))
if hasattr(self, 'downsample'):
proj_conv_weight = np2th(weights[pjoin(n_block, n_unit,
'conv_proj/kernel')], conv=True)
proj_gn_weight = np2th(weights[pjoin(n_block, n_unit,
'gn_proj/scale')])
proj_gn_bias = np2th(weights[pjoin(n_block, n_unit,
'gn_proj/bias')])
self.downsample.weight.copy_(proj_conv_weight)
self.gn_proj.weight.copy_(proj_gn_weight.view(-1))
self.gn_proj.bias.copy_(proj_gn_bias.view(-1))
class ResNetV2New(nn.Module):
"""Implementation of Pre-activation (v2) ResNet mode."""
def __init__(self, block_units, width_factor):
super().__init__()
width = int(64 * width_factor)
self.width = width
self.root = nn.Sequential(OrderedDict([('conv', StdConv2d(3, width,
kernel_size=7, stride=2, bias=False, padding=3)), ('gn', nn.
GroupNorm(32, width, eps=1e-06)), ('relu', nn.ReLU(inplace=True
)), ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=0))]))
self.body = nn.Sequential(OrderedDict([('block1', nn.Sequential(
OrderedDict([('unit1', PreActBottleneck(cin=width, cout=width *
4, cmid=width))] + [(f'unit{i:d}', PreActBottleneck(cin=width *
4, cout=width * 4, cmid=width)) for i in range(2, block_units[0
] + 1)]))), ('block2', nn.Sequential(OrderedDict([('unit1',
PreActBottleneck(cin=width * 4, cout=width * 8, cmid=width * 2,
stride=2))] + [(f'unit{i:d}', PreActBottleneck(cin=width * 8,
cout=width * 8, cmid=width * 2)) for i in range(2, block_units[
1] + 1)]))), ('block3', nn.Sequential(OrderedDict([('unit1',
PreActBottleneck(cin=width * 8, cout=width * 16, cmid=width * 4,
stride=2))] + [(f'unit{i:d}', PreActBottleneck(cin=width * 16,
cout=width * 16, cmid=width * 4)) for i in range(2, block_units
[2] + 1)])))]))
def forward(self, input_0):
primals_1 = self.root.conv.weight
primals_3 = self.root.gn.weight
primals_4 = self.root.gn.bias
primals_9 = self.body.block1.unit1.gn1.weight
primals_10 = self.body.block1.unit1.gn1.bias
primals_8 = self.body.block1.unit1.conv1.weight
primals_12 = self.body.block1.unit1.gn2.weight
primals_13 = self.body.block1.unit1.gn2.bias
primals_11 = self.body.block1.unit1.conv2.weight
primals_6 = self.body.block1.unit1.gn3.weight
primals_7 = self.body.block1.unit1.gn3.bias
primals_5 = self.body.block1.unit1.conv3.weight
primals_14 = self.body.block1.unit1.downsample.weight
primals_15 = self.body.block1.unit1.gn_proj.weight
primals_16 = self.body.block1.unit1.gn_proj.bias
primals_18 = self.body.block1.unit2.gn1.weight
primals_19 = self.body.block1.unit2.gn1.bias
primals_17 = self.body.block1.unit2.conv1.weight
primals_21 = self.body.block1.unit2.gn2.weight
primals_22 = self.body.block1.unit2.gn2.bias
primals_20 = self.body.block1.unit2.conv2.weight
primals_24 = self.body.block1.unit2.gn3.weight
primals_25 = self.body.block1.unit2.gn3.bias
primals_23 = self.body.block1.unit2.conv3.weight
primals_27 = self.body.block1.unit3.gn1.weight
primals_28 = self.body.block1.unit3.gn1.bias
primals_26 = self.body.block1.unit3.conv1.weight
primals_30 = self.body.block1.unit3.gn2.weight
primals_31 = self.body.block1.unit3.gn2.bias
primals_29 = self.body.block1.unit3.conv2.weight
primals_33 = self.body.block1.unit3.gn3.weight
primals_34 = self.body.block1.unit3.gn3.bias
primals_32 = self.body.block1.unit3.conv3.weight
primals_36 = self.body.block1.unit4.gn1.weight
primals_37 = self.body.block1.unit4.gn1.bias
primals_35 = self.body.block1.unit4.conv1.weight
primals_39 = self.body.block1.unit4.gn2.weight
primals_40 = self.body.block1.unit4.gn2.bias
primals_38 = self.body.block1.unit4.conv2.weight
primals_42 = self.body.block1.unit4.gn3.weight
primals_43 = self.body.block1.unit4.gn3.bias
primals_41 = self.body.block1.unit4.conv3.weight
primals_48 = self.body.block2.unit1.gn1.weight
primals_49 = self.body.block2.unit1.gn1.bias
primals_47 = self.body.block2.unit1.conv1.weight
primals_51 = self.body.block2.unit1.gn2.weight
primals_52 = self.body.block2.unit1.gn2.bias
primals_50 = self.body.block2.unit1.conv2.weight
primals_45 = self.body.block2.unit1.gn3.weight
primals_46 = self.body.block2.unit1.gn3.bias
primals_53 = self.body.block2.unit1.conv3.weight
primals_44 = self.body.block2.unit1.downsample.weight
primals_54 = self.body.block2.unit1.gn_proj.weight
primals_55 = self.body.block2.unit1.gn_proj.bias
primals_57 = self.body.block2.unit2.gn1.weight
primals_58 = self.body.block2.unit2.gn1.bias
primals_56 = self.body.block2.unit2.conv1.weight
primals_60 = self.body.block2.unit2.gn2.weight
primals_61 = self.body.block2.unit2.gn2.bias
primals_59 = self.body.block2.unit2.conv2.weight
primals_63 = self.body.block2.unit2.gn3.weight
primals_64 = self.body.block2.unit2.gn3.bias
primals_62 = self.body.block2.unit2.conv3.weight
primals_66 = self.body.block2.unit3.gn1.weight
primals_67 = self.body.block2.unit3.gn1.bias
primals_65 = self.body.block2.unit3.conv1.weight
primals_69 = self.body.block2.unit3.gn2.weight
primals_70 = self.body.block2.unit3.gn2.bias
primals_68 = self.body.block2.unit3.conv2.weight
primals_72 = self.body.block2.unit3.gn3.weight
primals_73 = self.body.block2.unit3.gn3.bias
primals_71 = self.body.block2.unit3.conv3.weight
primals_75 = self.body.block2.unit4.gn1.weight
primals_76 = self.body.block2.unit4.gn1.bias
primals_74 = self.body.block2.unit4.conv1.weight
primals_78 = self.body.block2.unit4.gn2.weight
primals_79 = self.body.block2.unit4.gn2.bias
primals_77 = self.body.block2.unit4.conv2.weight
primals_81 = self.body.block2.unit4.gn3.weight
primals_82 = self.body.block2.unit4.gn3.bias
primals_80 = self.body.block2.unit4.conv3.weight
primals_87 = self.body.block3.unit1.gn1.weight
primals_88 = self.body.block3.unit1.gn1.bias
primals_86 = self.body.block3.unit1.conv1.weight
primals_90 = self.body.block3.unit1.gn2.weight
primals_91 = self.body.block3.unit1.gn2.bias
primals_89 = self.body.block3.unit1.conv2.weight
primals_84 = self.body.block3.unit1.gn3.weight
primals_85 = self.body.block3.unit1.gn3.bias
primals_92 = self.body.block3.unit1.conv3.weight
primals_83 = self.body.block3.unit1.downsample.weight
primals_93 = self.body.block3.unit1.gn_proj.weight
primals_94 = self.body.block3.unit1.gn_proj.bias
primals_96 = self.body.block3.unit2.gn1.weight
primals_97 = self.body.block3.unit2.gn1.bias
primals_95 = self.body.block3.unit2.conv1.weight
primals_99 = self.body.block3.unit2.gn2.weight
primals_100 = self.body.block3.unit2.gn2.bias
primals_98 = self.body.block3.unit2.conv2.weight
primals_102 = self.body.block3.unit2.gn3.weight
primals_103 = self.body.block3.unit2.gn3.bias
primals_101 = self.body.block3.unit2.conv3.weight
primals_105 = self.body.block3.unit3.gn1.weight
primals_106 = self.body.block3.unit3.gn1.bias
primals_104 = self.body.block3.unit3.conv1.weight
primals_108 = self.body.block3.unit3.gn2.weight
primals_109 = self.body.block3.unit3.gn2.bias
primals_107 = self.body.block3.unit3.conv2.weight
primals_111 = self.body.block3.unit3.gn3.weight
primals_112 = self.body.block3.unit3.gn3.bias
primals_110 = self.body.block3.unit3.conv3.weight
primals_114 = self.body.block3.unit4.gn1.weight
primals_115 = self.body.block3.unit4.gn1.bias
primals_113 = self.body.block3.unit4.conv1.weight
primals_117 = self.body.block3.unit4.gn2.weight
primals_118 = self.body.block3.unit4.gn2.bias
primals_116 = self.body.block3.unit4.conv2.weight
primals_120 = self.body.block3.unit4.gn3.weight
primals_121 = self.body.block3.unit4.gn3.bias
primals_119 = self.body.block3.unit4.conv3.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47, primals_48, primals_49,
primals_50, primals_51, primals_52, primals_53, primals_54,
primals_55, primals_56, primals_57, primals_58, primals_59,
primals_60, primals_61, primals_62, primals_63, primals_64,
primals_65, primals_66, primals_67, primals_68, primals_69,
primals_70, primals_71, primals_72, primals_73, primals_74,
primals_75, primals_76, primals_77, primals_78, primals_79,
primals_80, primals_81, primals_82, primals_83, primals_84,
primals_85, primals_86, primals_87, primals_88, primals_89,
primals_90, primals_91, primals_92, primals_93, primals_94,
primals_95, primals_96, primals_97, primals_98, primals_99,
primals_100, primals_101, primals_102, primals_103, primals_104,
primals_105, primals_106, primals_107, primals_108, primals_109,
primals_110, primals_111, primals_112, primals_113, primals_114,
primals_115, primals_116, primals_117, primals_118, primals_119,
primals_120, primals_121])
return output[0]
|
Yifanfanfanfan/ViT-pytorch
|
ResNetV2
| false
| 12,309
|
[
"MIT"
] | 0
|
0f975aa7d3fd0aba6f74260c2b5a91786f1211ba
|
https://github.com/Yifanfanfanfan/ViT-pytorch/tree/0f975aa7d3fd0aba6f74260c2b5a91786f1211ba
|
NegSamplingLoss
|
import torch
import torch.nn as nn
class NegSamplingLoss(nn.Module):
def __init__(self):
super(NegSamplingLoss, self).__init__()
def forward(self, score, sign):
return -torch.mean(torch.sigmoid(sign * score))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_mul_neg_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = -tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_mul_neg_sigmoid_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class NegSamplingLossNew(nn.Module):
def __init__(self):
super(NegSamplingLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
dreaming-qin/RecBole
|
NegSamplingLoss
| false
| 12,310
|
[
"MIT"
] | 0
|
d6de39521484ded60c387ca604abaf86310acdbe
|
https://github.com/dreaming-qin/RecBole/tree/d6de39521484ded60c387ca604abaf86310acdbe
|
InnerProductLoss
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class InnerProductLoss(nn.Module):
"""This is the inner-product loss used in CFKG for optimization.
"""
def __init__(self):
super(InnerProductLoss, self).__init__()
def forward(self, anchor, positive, negative):
pos_score = torch.mul(anchor, positive).sum(dim=1)
neg_score = torch.mul(anchor, negative).sum(dim=1)
return (F.softplus(-pos_score) + F.softplus(neg_score)).mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_mul_neg_softplus_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp4 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp7 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp8 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp11 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp12 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp16 = tl.load(in_ptr2 + (r0 + 64 * r1), None)
tmp18 = tl.load(in_ptr2 + (16 + r0 + 64 * r1), None)
tmp21 = tl.load(in_ptr2 + (32 + r0 + 64 * r1), None)
tmp24 = tl.load(in_ptr2 + (48 + r0 + 64 * r1), None)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp15 = -tmp14
tmp17 = tmp0 * tmp16
tmp19 = tmp3 * tmp18
tmp20 = tmp17 + tmp19
tmp22 = tmp7 * tmp21
tmp23 = tmp20 + tmp22
tmp25 = tmp11 * tmp24
tmp26 = tmp23 + tmp25
tmp27 = 20.0
tmp28 = tmp15 > tmp27
tmp29 = tl_math.exp(tmp15)
tmp30 = libdevice.log1p(tmp29)
tmp31 = tl.where(tmp28, tmp15, tmp30)
tmp32 = tmp26 > tmp27
tmp33 = tl_math.exp(tmp26)
tmp34 = libdevice.log1p(tmp33)
tmp35 = tl.where(tmp32, tmp26, tmp34)
tmp36 = tmp31 + tmp35
tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK])
tmp39 = tl.sum(tmp37, 1)[:, None]
tmp40 = 64.0
tmp41 = tmp39 / tmp40
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp41, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
get_raw_stream(0)
triton_per_fused_add_mean_mul_neg_softplus_sum_0[grid(1)](buf3,
arg1_1, arg0_1, arg2_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf3,
class InnerProductLossNew(nn.Module):
"""This is the inner-product loss used in CFKG for optimization.
"""
def __init__(self):
super(InnerProductLossNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
dreaming-qin/RecBole
|
InnerProductLoss
| false
| 12,311
|
[
"MIT"
] | 0
|
d6de39521484ded60c387ca604abaf86310acdbe
|
https://github.com/dreaming-qin/RecBole/tree/d6de39521484ded60c387ca604abaf86310acdbe
|
InnerProductLayer
|
import torch
import torch.nn as nn
class InnerProductLayer(nn.Module):
"""InnerProduct Layer used in PNN that compute the element-wise
product or inner product between feature vectors.
"""
def __init__(self, num_feature_field, device):
"""
Args:
num_feature_field(int) :number of feature fields.
device(torch.device) : device object of the model.
"""
super(InnerProductLayer, self).__init__()
self.num_feature_field = num_feature_field
self
def forward(self, feat_emb):
"""
Args:
feat_emb(torch.FloatTensor) :3D tensor with shape: [batch_size,num_pairs,embedding_size].
Returns:
inner_product(torch.FloatTensor): The inner product of input tensor. shape of [batch_size, num_pairs]
"""
row = []
col = []
for i in range(self.num_feature_field - 1):
for j in range(i + 1, self.num_feature_field):
row.append(i)
col.append(j)
p = feat_emb[:, row]
q = feat_emb[:, col]
inner_product = p * q
return inner_product.sum(dim=-1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_feature_field': 4, 'device': 0}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_index_mul_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 96
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 6
x0 = xindex % 4
x2 = xindex // 24
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 3, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 2, tl.int64)
tmp6 = tmp0 < tmp5
tmp7 = tl.full([1], 0, tl.int64)
tmp8 = tl.where(tmp6, tmp7, tmp7)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tl.full([1], 4, tl.int64)
tmp11 = tmp0 < tmp10
tmp12 = tl.full([1], 5, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.where(tmp13, tmp3, tmp5)
tmp15 = tl.where(tmp11, tmp3, tmp14)
tmp16 = tl.where(tmp2, tmp9, tmp15)
tmp17 = tl.load(in_ptr0 + (4 * x0 + 16 * tmp16 + 64 * x2), xmask,
eviction_policy='evict_last')
tmp18 = tl.where(tmp6, tmp5, tmp1)
tmp19 = tl.where(tmp4, tmp3, tmp18)
tmp20 = tl.where(tmp13, tmp1, tmp1)
tmp21 = tl.where(tmp11, tmp5, tmp20)
tmp22 = tl.where(tmp2, tmp19, tmp21)
tmp23 = tl.load(in_ptr0 + (4 * x0 + 16 * tmp22 + 64 * x2), xmask,
eviction_policy='evict_last')
tmp24 = tmp17 * tmp23
tmp25 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * tmp16 + 64 * x2), xmask,
eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * tmp22 + 64 * x2), xmask,
eviction_policy='evict_last')
tmp27 = tmp25 * tmp26
tmp28 = tmp24 + tmp27
tmp29 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * tmp16 + 64 * x2), xmask,
eviction_policy='evict_last')
tmp30 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * tmp22 + 64 * x2), xmask,
eviction_policy='evict_last')
tmp31 = tmp29 * tmp30
tmp32 = tmp28 + tmp31
tmp33 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * tmp16 + 64 * x2), xmask,
eviction_policy='evict_last')
tmp34 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * tmp22 + 64 * x2), xmask,
eviction_policy='evict_last')
tmp35 = tmp33 * tmp34
tmp36 = tmp32 + tmp35
tl.store(out_ptr0 + x3, tmp36, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 6, 4), (24, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_index_mul_sum_0[grid(96)](arg0_1, buf0, 96, XBLOCK
=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class InnerProductLayerNew(nn.Module):
"""InnerProduct Layer used in PNN that compute the element-wise
product or inner product between feature vectors.
"""
def __init__(self, num_feature_field, device):
"""
Args:
num_feature_field(int) :number of feature fields.
device(torch.device) : device object of the model.
"""
super(InnerProductLayerNew, self).__init__()
self.num_feature_field = num_feature_field
self
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
dreaming-qin/RecBole
|
InnerProductLayer
| false
| 12,312
|
[
"MIT"
] | 0
|
d6de39521484ded60c387ca604abaf86310acdbe
|
https://github.com/dreaming-qin/RecBole/tree/d6de39521484ded60c387ca604abaf86310acdbe
|
ConvNCFBPRLoss
|
import torch
import torch.nn as nn
class ConvNCFBPRLoss(nn.Module):
""" ConvNCFBPRLoss, based on Bayesian Personalized Ranking,
Shape:
- Pos_score: (N)
- Neg_score: (N), same shape as the Pos_score
- Output: scalar.
Examples::
>>> loss = ConvNCFBPRLoss()
>>> pos_score = torch.randn(3, requires_grad=True)
>>> neg_score = torch.randn(3, requires_grad=True)
>>> output = loss(pos_score, neg_score)
>>> output.backward()
"""
def __init__(self):
super(ConvNCFBPRLoss, self).__init__()
def forward(self, pos_score, neg_score):
distance = pos_score - neg_score
loss = torch.sum(torch.log(1 + torch.exp(-distance)))
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_exp_log_neg_sub_sum_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = -tmp2
tmp4 = tl_math.exp(tmp3)
tmp5 = 1.0
tmp6 = tmp4 + tmp5
tmp7 = tl_math.log(tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_exp_log_neg_sub_sum_0[grid(1)](arg0_1, arg1_1,
buf0, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class ConvNCFBPRLossNew(nn.Module):
""" ConvNCFBPRLoss, based on Bayesian Personalized Ranking,
Shape:
- Pos_score: (N)
- Neg_score: (N), same shape as the Pos_score
- Output: scalar.
Examples::
>>> loss = ConvNCFBPRLoss()
>>> pos_score = torch.randn(3, requires_grad=True)
>>> neg_score = torch.randn(3, requires_grad=True)
>>> output = loss(pos_score, neg_score)
>>> output.backward()
"""
def __init__(self):
super(ConvNCFBPRLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
dreaming-qin/RecBole
|
ConvNCFBPRLoss
| false
| 12,313
|
[
"MIT"
] | 0
|
d6de39521484ded60c387ca604abaf86310acdbe
|
https://github.com/dreaming-qin/RecBole/tree/d6de39521484ded60c387ca604abaf86310acdbe
|
BaseFactorizationMachine
|
import torch
import torch.nn as nn
class BaseFactorizationMachine(nn.Module):
"""Calculate FM result over the embeddings
Args:
reduce_sum: bool, whether to sum the result, default is True.
Input:
input_x: tensor, A 3D tensor with shape:``(batch_size,field_size,embed_dim)``.
Output
output: tensor, A 3D tensor with shape: ``(batch_size,1)`` or ``(batch_size, embed_dim)``.
"""
def __init__(self, reduce_sum=True):
super(BaseFactorizationMachine, self).__init__()
self.reduce_sum = reduce_sum
def forward(self, input_x):
square_of_sum = torch.sum(input_x, dim=1) ** 2
sum_of_square = torch.sum(input_x ** 2, dim=1)
output = square_of_sum - sum_of_square
if self.reduce_sum:
output = torch.sum(output, dim=1, keepdim=True)
output = 0.5 * output
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp16 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask)
tmp17 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask)
tmp19 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask)
tmp21 = tl.load(in_ptr0 + (52 + x0 + 64 * x1), xmask)
tmp33 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), xmask)
tmp34 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), xmask)
tmp36 = tl.load(in_ptr0 + (40 + x0 + 64 * x1), xmask)
tmp38 = tl.load(in_ptr0 + (56 + x0 + 64 * x1), xmask)
tmp50 = tl.load(in_ptr0 + (12 + x0 + 64 * x1), xmask)
tmp51 = tl.load(in_ptr0 + (28 + x0 + 64 * x1), xmask)
tmp53 = tl.load(in_ptr0 + (44 + x0 + 64 * x1), xmask)
tmp55 = tl.load(in_ptr0 + (60 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp0 * tmp0
tmp9 = tmp1 * tmp1
tmp10 = tmp8 + tmp9
tmp11 = tmp3 * tmp3
tmp12 = tmp10 + tmp11
tmp13 = tmp5 * tmp5
tmp14 = tmp12 + tmp13
tmp15 = tmp7 - tmp14
tmp18 = tmp16 + tmp17
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp23 = tmp22 * tmp22
tmp24 = tmp16 * tmp16
tmp25 = tmp17 * tmp17
tmp26 = tmp24 + tmp25
tmp27 = tmp19 * tmp19
tmp28 = tmp26 + tmp27
tmp29 = tmp21 * tmp21
tmp30 = tmp28 + tmp29
tmp31 = tmp23 - tmp30
tmp32 = tmp15 + tmp31
tmp35 = tmp33 + tmp34
tmp37 = tmp35 + tmp36
tmp39 = tmp37 + tmp38
tmp40 = tmp39 * tmp39
tmp41 = tmp33 * tmp33
tmp42 = tmp34 * tmp34
tmp43 = tmp41 + tmp42
tmp44 = tmp36 * tmp36
tmp45 = tmp43 + tmp44
tmp46 = tmp38 * tmp38
tmp47 = tmp45 + tmp46
tmp48 = tmp40 - tmp47
tmp49 = tmp32 + tmp48
tmp52 = tmp50 + tmp51
tmp54 = tmp52 + tmp53
tmp56 = tmp54 + tmp55
tmp57 = tmp56 * tmp56
tmp58 = tmp50 * tmp50
tmp59 = tmp51 * tmp51
tmp60 = tmp58 + tmp59
tmp61 = tmp53 * tmp53
tmp62 = tmp60 + tmp61
tmp63 = tmp55 * tmp55
tmp64 = tmp62 + tmp63
tmp65 = tmp57 - tmp64
tmp66 = tmp49 + tmp65
tmp67 = 0.5
tmp68 = tmp66 * tmp67
tl.store(in_out_ptr0 + x2, tmp68, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_pow_sub_sum_0[grid(16)](buf1, arg0_1, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
return buf1,
class BaseFactorizationMachineNew(nn.Module):
"""Calculate FM result over the embeddings
Args:
reduce_sum: bool, whether to sum the result, default is True.
Input:
input_x: tensor, A 3D tensor with shape:``(batch_size,field_size,embed_dim)``.
Output
output: tensor, A 3D tensor with shape: ``(batch_size,1)`` or ``(batch_size, embed_dim)``.
"""
def __init__(self, reduce_sum=True):
super(BaseFactorizationMachineNew, self).__init__()
self.reduce_sum = reduce_sum
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
dreaming-qin/RecBole
|
BaseFactorizationMachine
| false
| 12,314
|
[
"MIT"
] | 0
|
d6de39521484ded60c387ca604abaf86310acdbe
|
https://github.com/dreaming-qin/RecBole/tree/d6de39521484ded60c387ca604abaf86310acdbe
|
AGRUCell
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class AGRUCell(nn.Module):
' Attention based GRU (AGRU). AGRU uses the attention score to replace the update gate of GRU, and changes the\n hidden state directly.\n\n Formally:\n ..math: {h}_{t}^{\\prime}=\\left(1-a_{t}\right) * {h}_{t-1}^{\\prime}+a_{t} * \tilde{{h}}_{t}^{\\prime}\n\n :math:`{h}_{t}^{\\prime}`, :math:`h_{t-1}^{\\prime}`, :math:`{h}_{t-1}^{\\prime}`,\n :math: `\tilde{{h}}_{t}^{\\prime}` are the hidden state of AGRU\n\n '
def __init__(self, input_size, hidden_size, bias=True):
super(AGRUCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.weight_ih = nn.Parameter(torch.randn(3 * hidden_size, input_size))
self.weight_hh = nn.Parameter(torch.randn(3 * hidden_size, hidden_size)
)
if self.bias:
self.bias_ih = nn.Parameter(torch.zeros(3 * hidden_size))
self.bias_hh = nn.Parameter(torch.zeros(3 * hidden_size))
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
def forward(self, input, hidden_output, att_score):
gi = F.linear(input, self.weight_ih, self.bias_ih)
gh = F.linear(hidden_output, self.weight_hh, self.bias_hh)
i_r, _i_u, i_h = gi.chunk(3, 1)
h_r, _h_u, h_h = gh.chunk(3, 1)
reset_gate = torch.sigmoid(i_r + h_r)
new_state = torch.tanh(i_h + reset_gate * h_h)
att_score = att_score.view(-1, 1)
hy = (1 - att_score) * hidden_output + att_score * new_state
return hy
def get_inputs():
return [torch.rand([16, 4]), torch.rand([16, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 12 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 12 * x1), xmask)
tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr4 + x2, xmask)
tmp11 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr2 + (8 + x0 + 12 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp7 = 1.0
tmp8 = tmp7 - tmp6
tmp10 = tmp8 * tmp9
tmp13 = tmp11 + tmp12
tmp15 = tmp5 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = libdevice.tanh(tmp16)
tmp18 = tmp6 * tmp17
tmp19 = tmp10 + tmp18
tmp20 = tmp17 * tmp17
tmp21 = tmp7 - tmp20
tl.store(out_ptr0 + x2, tmp5, xmask)
tl.store(out_ptr1 + x2, tmp19, xmask)
tl.store(out_ptr2 + x2, tmp21, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (12, 4), (4, 1))
assert_size_stride(primals_2, (12,), (1,))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 12),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.addmm(primals_5, primals_6, reinterpret_tensor(
primals_4, (4, 12), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0[grid(64)](
buf0, primals_2, buf1, primals_7, primals_6, buf2, buf3, buf4,
64, XBLOCK=64, num_warps=1, num_stages=1)
del buf0
del primals_2
return buf3, primals_3, primals_6, primals_7, reinterpret_tensor(buf1,
(16, 4), (12, 1), 8), buf2, buf4
class AGRUCellNew(nn.Module):
' Attention based GRU (AGRU). AGRU uses the attention score to replace the update gate of GRU, and changes the\n hidden state directly.\n\n Formally:\n ..math: {h}_{t}^{\\prime}=\\left(1-a_{t}\right) * {h}_{t-1}^{\\prime}+a_{t} * \tilde{{h}}_{t}^{\\prime}\n\n :math:`{h}_{t}^{\\prime}`, :math:`h_{t-1}^{\\prime}`, :math:`{h}_{t-1}^{\\prime}`,\n :math: `\tilde{{h}}_{t}^{\\prime}` are the hidden state of AGRU\n\n '
def __init__(self, input_size, hidden_size, bias=True):
super(AGRUCellNew, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.weight_ih = nn.Parameter(torch.randn(3 * hidden_size, input_size))
self.weight_hh = nn.Parameter(torch.randn(3 * hidden_size, hidden_size)
)
if self.bias:
self.bias_ih = nn.Parameter(torch.zeros(3 * hidden_size))
self.bias_hh = nn.Parameter(torch.zeros(3 * hidden_size))
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
def forward(self, input_0, input_1, input_2):
primals_1 = self.weight_ih
primals_4 = self.weight_hh
primals_2 = self.bias_ih
primals_5 = self.bias_hh
primals_3 = input_0
primals_6 = input_1
primals_7 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
dreaming-qin/RecBole
|
AGRUCell
| false
| 12,315
|
[
"MIT"
] | 0
|
d6de39521484ded60c387ca604abaf86310acdbe
|
https://github.com/dreaming-qin/RecBole/tree/d6de39521484ded60c387ca604abaf86310acdbe
|
RegLoss
|
import torch
import torch.nn as nn
class RegLoss(nn.Module):
""" RegLoss, L2 regularization on model parameters
"""
def __init__(self):
super(RegLoss, self).__init__()
def forward(self, parameters):
reg_loss = None
for W in parameters:
if reg_loss is None:
reg_loss = W.norm(2)
else:
reg_loss = reg_loss + W.norm(2)
return reg_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_linalg_vector_norm_0(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr0 + (64 + r0), None)
tmp10 = tl.load(in_ptr0 + (128 + r0), None)
tmp15 = tl.load(in_ptr0 + (192 + r0), None)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.sum(tmp2, 1)[:, None]
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp11 = tmp10 * tmp10
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.sum(tmp12, 1)[:, None]
tmp16 = tmp15 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.sum(tmp17, 1)[:, None]
tmp20 = libdevice.sqrt(tmp4)
tmp21 = libdevice.sqrt(tmp9)
tmp22 = tmp20 + tmp21
tmp23 = libdevice.sqrt(tmp14)
tmp24 = tmp22 + tmp23
tmp25 = libdevice.sqrt(tmp19)
tmp26 = tmp24 + tmp25
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp26, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf4 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_linalg_vector_norm_0[grid(1)](buf4, arg0_1, 1,
64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf4,
class RegLossNew(nn.Module):
""" RegLoss, L2 regularization on model parameters
"""
def __init__(self):
super(RegLossNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
dreaming-qin/RecBole
|
RegLoss
| false
| 12,316
|
[
"MIT"
] | 0
|
d6de39521484ded60c387ca604abaf86310acdbe
|
https://github.com/dreaming-qin/RecBole/tree/d6de39521484ded60c387ca604abaf86310acdbe
|
AttLayer
|
import torch
import torch.nn as nn
import torch.nn.functional as fn
class AttLayer(nn.Module):
"""Calculate the attention signal(weight) according the input tensor.
Args:
infeatures (torch.FloatTensor): A 3D input tensor with shape of[batch_size, M, embed_dim].
Returns:
torch.FloatTensor: Attention weight of input. shape of [batch_size, M].
"""
def __init__(self, in_dim, att_dim):
super(AttLayer, self).__init__()
self.in_dim = in_dim
self.att_dim = att_dim
self.w = torch.nn.Linear(in_features=in_dim, out_features=att_dim,
bias=False)
self.h = nn.Parameter(torch.randn(att_dim), requires_grad=True)
def forward(self, infeatures):
att_signal = self.w(infeatures)
att_signal = fn.relu(att_signal)
att_signal = torch.mul(att_signal, self.h)
att_signal = torch.sum(att_signal, dim=2)
att_signal = fn.softmax(att_signal, dim=1)
return att_signal
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'att_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_relu_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp9 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp13 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = tmp2 * tmp3
tmp6 = triton_helpers.maximum(tmp1, tmp5)
tmp7 = tmp6 * tmp3
tmp8 = tmp4 + tmp7
tmp10 = triton_helpers.maximum(tmp1, tmp9)
tmp11 = tmp10 * tmp3
tmp12 = tmp8 + tmp11
tmp14 = triton_helpers.maximum(tmp1, tmp13)
tmp15 = tmp14 * tmp3
tmp16 = tmp12 + tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_relu_sum_0[grid(64)](buf0, primals_3, buf1, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
triton_poi_fused__softmax_2[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf2
return buf3, primals_3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), buf0, buf3
class AttLayerNew(nn.Module):
"""Calculate the attention signal(weight) according the input tensor.
Args:
infeatures (torch.FloatTensor): A 3D input tensor with shape of[batch_size, M, embed_dim].
Returns:
torch.FloatTensor: Attention weight of input. shape of [batch_size, M].
"""
def __init__(self, in_dim, att_dim):
super(AttLayerNew, self).__init__()
self.in_dim = in_dim
self.att_dim = att_dim
self.w = torch.nn.Linear(in_features=in_dim, out_features=att_dim,
bias=False)
self.h = nn.Parameter(torch.randn(att_dim), requires_grad=True)
def forward(self, input_0):
primals_3 = self.h
primals_1 = self.w.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
dreaming-qin/RecBole
|
AttLayer
| false
| 12,317
|
[
"MIT"
] | 0
|
d6de39521484ded60c387ca604abaf86310acdbe
|
https://github.com/dreaming-qin/RecBole/tree/d6de39521484ded60c387ca604abaf86310acdbe
|
Repeat_Explore_Mechanism
|
import torch
import torch.nn as nn
class Repeat_Explore_Mechanism(nn.Module):
def __init__(self, device, hidden_size, seq_len, dropout_prob):
super(Repeat_Explore_Mechanism, self).__init__()
self.dropout = nn.Dropout(dropout_prob)
self.hidden_size = hidden_size
self.device = device
self.seq_len = seq_len
self.Wre = nn.Linear(hidden_size, hidden_size, bias=False)
self.Ure = nn.Linear(hidden_size, hidden_size, bias=False)
self.tanh = nn.Tanh()
self.Vre = nn.Linear(hidden_size, 1, bias=False)
self.Wcre = nn.Linear(hidden_size, 2, bias=False)
def forward(self, all_memory, last_memory):
"""
calculate the probability of Repeat and explore
"""
all_memory_values = all_memory
all_memory = self.dropout(self.Ure(all_memory))
last_memory = self.dropout(self.Wre(last_memory))
last_memory = last_memory.unsqueeze(1)
last_memory = last_memory.repeat(1, self.seq_len, 1)
output_ere = self.tanh(all_memory + last_memory)
output_ere = self.Vre(output_ere)
alpha_are = nn.Softmax(dim=1)(output_ere)
alpha_are = alpha_are.repeat(1, 1, self.hidden_size)
output_cre = alpha_are * all_memory_values
output_cre = output_cre.sum(dim=1)
output_cre = self.Wcre(output_cre)
repeat_explore_mechanism = nn.Softmax(dim=-1)(output_cre)
return repeat_explore_mechanism
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'device': 0, 'hidden_size': 4, 'seq_len': 4,
'dropout_prob': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_repeat_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x3, tmp3, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_repeat_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask)
tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (8 + x0 + 16 * x1), xmask)
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x2, tmp14, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 2
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + x2, tmp11, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (1, 4), (4, 1))
assert_size_stride(primals_6, (2, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_4, reinterpret_tensor(primals_3, (4, 4),
(1, 4), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_repeat_tanh_0[grid(64)](buf2, buf1, 64, XBLOCK
=64, num_warps=1, num_stages=1)
buf3 = reinterpret_tensor(buf1, (16, 1), (1, 1), 0)
del buf1
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused__softmax_1[grid(16)](buf3, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = reinterpret_tensor(buf3, (4, 4, 1), (4, 1, 1), 0)
del buf3
triton_poi_fused__softmax_2[grid(16)](buf4, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4), (4, 1), 0)
del buf4
triton_poi_fused_mul_repeat_sum_3[grid(16)](buf5, primals_1, buf6,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
extern_kernels.mm(buf6, reinterpret_tensor(primals_6, (4, 2), (1, 4
), 0), out=buf7)
buf8 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
triton_poi_fused__softmax_4[grid(8)](buf7, buf8, 8, XBLOCK=8,
num_warps=1, num_stages=1)
del buf7
return (buf8, primals_1, primals_4, buf2, buf5, buf6, buf8, primals_6,
primals_5)
class Repeat_Explore_MechanismNew(nn.Module):
def __init__(self, device, hidden_size, seq_len, dropout_prob):
super(Repeat_Explore_MechanismNew, self).__init__()
self.dropout = nn.Dropout(dropout_prob)
self.hidden_size = hidden_size
self.device = device
self.seq_len = seq_len
self.Wre = nn.Linear(hidden_size, hidden_size, bias=False)
self.Ure = nn.Linear(hidden_size, hidden_size, bias=False)
self.tanh = nn.Tanh()
self.Vre = nn.Linear(hidden_size, 1, bias=False)
self.Wcre = nn.Linear(hidden_size, 2, bias=False)
def forward(self, input_0, input_1):
primals_2 = self.Wre.weight
primals_3 = self.Ure.weight
primals_5 = self.Vre.weight
primals_6 = self.Wcre.weight
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
dreaming-qin/RecBole
|
Repeat_Explore_Mechanism
| false
| 12,318
|
[
"MIT"
] | 0
|
d6de39521484ded60c387ca604abaf86310acdbe
|
https://github.com/dreaming-qin/RecBole/tree/d6de39521484ded60c387ca604abaf86310acdbe
|
ItemToInterestAggregation
|
import torch
import torch.nn as nn
class ItemToInterestAggregation(nn.Module):
def __init__(self, seq_len, hidden_size, k_interests=5):
super().__init__()
self.k_interests = k_interests
self.theta = nn.Parameter(torch.randn([hidden_size, k_interests]))
def forward(self, input_tensor):
D_matrix = torch.matmul(input_tensor, self.theta)
D_matrix = nn.Softmax(dim=-2)(D_matrix)
result = torch.einsum('nij, nik -> nkj', input_tensor, D_matrix)
return result
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'seq_len': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 5
x2 = xindex // 20
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 20 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (5 + x0 + 20 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (10 + x0 + 20 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (15 + x0 + 20 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 5
x2 = xindex // 20
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 20 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (5 + x0 + 20 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (10 + x0 + 20 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (15 + x0 + 20 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 5), (5, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 5), (5, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(80)](buf0, buf1, 80, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
triton_poi_fused__softmax_1[grid(80)](buf1, buf2, 80, XBLOCK=128,
num_warps=4, num_stages=1)
buf3 = buf1
del buf1
extern_kernels.bmm(reinterpret_tensor(primals_2, (4, 4, 4), (16, 1,
4), 0), buf2, out=buf3)
del buf2
return reinterpret_tensor(buf3, (4, 5, 4), (20, 1, 5), 0), primals_2, buf0
class ItemToInterestAggregationNew(nn.Module):
def __init__(self, seq_len, hidden_size, k_interests=5):
super().__init__()
self.k_interests = k_interests
self.theta = nn.Parameter(torch.randn([hidden_size, k_interests]))
def forward(self, input_0):
primals_1 = self.theta
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
dreaming-qin/RecBole
|
ItemToInterestAggregation
| false
| 12,319
|
[
"MIT"
] | 0
|
d6de39521484ded60c387ca604abaf86310acdbe
|
https://github.com/dreaming-qin/RecBole/tree/d6de39521484ded60c387ca604abaf86310acdbe
|
AUGRUCell
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class AUGRUCell(nn.Module):
' Effect of GRU with attentional update gate (AUGRU). AUGRU combines attention mechanism and GRU seamlessly.\n\n Formally:\n ..math: \tilde{{u}}_{t}^{\\prime}=a_{t} * {u}_{t}^{\\prime} \\\n {h}_{t}^{\\prime}=\\left(1-\tilde{{u}}_{t}^{\\prime}\right) \\circ {h}_{t-1}^{\\prime}+\tilde{{u}}_{t}^{\\prime} \\circ \tilde{{h}}_{t}^{\\prime}\n\n '
def __init__(self, input_size, hidden_size, bias=True):
super(AUGRUCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.weight_ih = nn.Parameter(torch.randn(3 * hidden_size, input_size))
self.weight_hh = nn.Parameter(torch.randn(3 * hidden_size, hidden_size)
)
if bias:
self.bias_ih = nn.Parameter(torch.zeros(3 * hidden_size))
self.bias_hh = nn.Parameter(torch.zeros(3 * hidden_size))
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
def forward(self, input, hidden_output, att_score):
gi = F.linear(input, self.weight_ih, self.bias_ih)
gh = F.linear(hidden_output, self.weight_hh, self.bias_hh)
i_r, i_u, i_h = gi.chunk(3, 1)
h_r, h_u, h_h = gh.chunk(3, 1)
reset_gate = torch.sigmoid(i_r + h_r)
update_gate = torch.sigmoid(i_u + h_u)
new_state = torch.tanh(i_h + reset_gate * h_h)
att_score = att_score.view(-1, 1)
update_gate = att_score * update_gate
hy = (1 - update_gate) * hidden_output + update_gate * new_state
return hy
def get_inputs():
return [torch.rand([64, 4]), torch.rand([64, 4]), torch.rand([16, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + 12 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (4 + x0 + 12 * x1), xmask)
tmp6 = tl.load(in_ptr0 + (x0 + 12 * x1), xmask)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (x0 + 12 * x1), xmask)
tmp12 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask)
tmp13 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (8 + x0 + 12 * x1), xmask)
tmp19 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr4 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = tl.sigmoid(tmp10)
tmp14 = tmp12 + tmp13
tmp16 = tmp11 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = libdevice.tanh(tmp17)
tmp20 = tmp19 * tmp5
tmp21 = 1.0
tmp22 = tmp21 - tmp20
tmp24 = tmp22 * tmp23
tmp25 = tmp20 * tmp18
tmp26 = tmp24 + tmp25
tl.store(out_ptr0 + x2, tmp5, xmask)
tl.store(out_ptr1 + x2, tmp11, xmask)
tl.store(out_ptr2 + x2, tmp18, xmask)
tl.store(out_ptr3 + x2, tmp26, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (12, 4), (4, 1))
assert_size_stride(primals_2, (12,), (1,))
assert_size_stride(primals_3, (64, 4), (4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (64, 4), (4, 1))
assert_size_stride(primals_7, (16, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 12), (12, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 12),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 12), (12, 1), torch.float32)
extern_kernels.addmm(primals_5, primals_6, reinterpret_tensor(
primals_4, (4, 12), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_0[grid(256)](buf0,
primals_2, buf1, primals_7, primals_6, buf3, buf2, buf4, buf5,
256, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_2
return buf5, primals_3, primals_6, primals_7, reinterpret_tensor(buf1,
(64, 4), (12, 1), 8), buf2, buf3, buf4
class AUGRUCellNew(nn.Module):
' Effect of GRU with attentional update gate (AUGRU). AUGRU combines attention mechanism and GRU seamlessly.\n\n Formally:\n ..math: \tilde{{u}}_{t}^{\\prime}=a_{t} * {u}_{t}^{\\prime} \\\n {h}_{t}^{\\prime}=\\left(1-\tilde{{u}}_{t}^{\\prime}\right) \\circ {h}_{t-1}^{\\prime}+\tilde{{u}}_{t}^{\\prime} \\circ \tilde{{h}}_{t}^{\\prime}\n\n '
def __init__(self, input_size, hidden_size, bias=True):
super(AUGRUCellNew, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.weight_ih = nn.Parameter(torch.randn(3 * hidden_size, input_size))
self.weight_hh = nn.Parameter(torch.randn(3 * hidden_size, hidden_size)
)
if bias:
self.bias_ih = nn.Parameter(torch.zeros(3 * hidden_size))
self.bias_hh = nn.Parameter(torch.zeros(3 * hidden_size))
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
def forward(self, input_0, input_1, input_2):
primals_1 = self.weight_ih
primals_4 = self.weight_hh
primals_2 = self.bias_ih
primals_5 = self.bias_hh
primals_3 = input_0
primals_6 = input_1
primals_7 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
dreaming-qin/RecBole
|
AUGRUCell
| false
| 12,320
|
[
"MIT"
] | 0
|
d6de39521484ded60c387ca604abaf86310acdbe
|
https://github.com/dreaming-qin/RecBole/tree/d6de39521484ded60c387ca604abaf86310acdbe
|
ComplexLinear
|
from torch.nn import Module
import torch
from torch.nn import Linear
class ComplexLinear(Module):
def __init__(self, in_features, out_features):
super(ComplexLinear, self).__init__()
self.fc_r = Linear(in_features, out_features)
self.fc_i = Linear(in_features, out_features)
def forward(self, input_r, input_i):
return self.fc_r(input_r) - self.fc_i(input_i), self.fc_r(input_i
) + self.fc_i(input_r)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
from torch.nn import Linear
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_out_ptr1 + x2, xmask)
tmp9 = tl.load(in_ptr3 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 - tmp5
tmp8 = tmp7 + tmp1
tmp10 = tmp9 + tmp4
tmp11 = tmp8 + tmp10
tl.store(in_out_ptr0 + x2, tmp6, xmask)
tl.store(in_out_ptr1 + x2, tmp11, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf3)
del primals_1
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf4)
del primals_4
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
get_raw_stream(0)
triton_poi_fused_add_sub_0[grid(256)](buf2, buf5, primals_2, buf1,
primals_5, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del buf4
del primals_2
del primals_5
return buf2, buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0)
class ComplexLinearNew(Module):
def __init__(self, in_features, out_features):
super(ComplexLinearNew, self).__init__()
self.fc_r = Linear(in_features, out_features)
self.fc_i = Linear(in_features, out_features)
def forward(self, input_0, input_1):
primals_1 = self.fc_r.weight
primals_2 = self.fc_r.bias
primals_4 = self.fc_i.weight
primals_5 = self.fc_i.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
|
drydenwiebe/complexPyTorch
|
ComplexLinear
| false
| 12,321
|
[
"MIT"
] | 0
|
cea88ba7ee5692dfa1b40f0ba609ef14160d5073
|
https://github.com/drydenwiebe/complexPyTorch/tree/cea88ba7ee5692dfa1b40f0ba609ef14160d5073
|
BinaryClassificationHead
|
from _paritybench_helpers import _mock_config
import torch
class BinaryClassificationHead(torch.nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.dense = torch.nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = torch.nn.Dropout(config.hidden_dropout_prob)
self.out_proj = torch.nn.Linear(config.hidden_size, 1)
def init_weights(self):
self.dense.weight.data.normal_(mean=0.0, std=self.config.
initializer_range)
if self.dense.bias is not None:
self.dense.bias.data.zero_()
def forward(self, features, **kwargs):
x = features[:, 0, :]
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, hidden_dropout_prob=
0.5)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
triton_poi_fused_tanh_1[grid(64)](buf2, primals_3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_3
buf4 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_5
return reinterpret_tensor(buf4, (4, 4, 1), (4, 1, 1), 0
), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf2, primals_4
class BinaryClassificationHeadNew(torch.nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.dense = torch.nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = torch.nn.Dropout(config.hidden_dropout_prob)
self.out_proj = torch.nn.Linear(config.hidden_size, 1)
def init_weights(self):
self.dense.weight.data.normal_(mean=0.0, std=self.config.
initializer_range)
if self.dense.bias is not None:
self.dense.bias.data.zero_()
def forward(self, input_0):
primals_2 = self.dense.weight
primals_3 = self.dense.bias
primals_4 = self.out_proj.weight
primals_5 = self.out_proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
BunnyNoBugs/DeepPavlov
|
BinaryClassificationHead
| false
| 12,322
|
[
"Apache-2.0"
] | 0
|
b2213db633a669d27d6f745dd780530574ccf8b5
|
https://github.com/BunnyNoBugs/DeepPavlov/tree/b2213db633a669d27d6f745dd780530574ccf8b5
|
ComplexConv2d
|
from torch.nn import Module
import torch
from torch.nn import Conv2d
class ComplexConv2d(Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super(ComplexConv2d, self).__init__()
self.conv_r = Conv2d(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias)
self.conv_i = Conv2d(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias)
def forward(self, input_r, input_i):
return self.conv_r(input_r) - self.conv_i(input_i), self.conv_r(input_i
) + self.conv_i(input_r)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
from torch.nn import Conv2d
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_add_convolution_sub_0(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_out_ptr1 + x3, xmask)
tmp9 = tl.load(in_ptr3 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 - tmp5
tmp8 = tmp7 + tmp1
tmp10 = tmp9 + tmp4
tmp11 = tmp8 + tmp10
tl.store(in_out_ptr0 + x3, tmp6, xmask)
tl.store(in_out_ptr1 + x3, tmp11, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1))
buf1 = extern_kernels.convolution(primals_6, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1))
buf3 = extern_kernels.convolution(primals_6, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 2, 2), (16, 4, 2, 1))
buf4 = extern_kernels.convolution(primals_3, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 2, 2), (16, 4, 2, 1))
buf2 = buf0
del buf0
buf5 = buf3
del buf3
get_raw_stream(0)
triton_poi_fused_add_convolution_sub_0[grid(64)](buf2, buf5,
primals_2, buf1, primals_5, buf4, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf1
del buf4
del primals_2
del primals_5
return buf2, buf5, primals_1, primals_3, primals_4, primals_6
class ComplexConv2dNew(Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super(ComplexConv2dNew, self).__init__()
self.conv_r = Conv2d(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias)
self.conv_i = Conv2d(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias)
def forward(self, input_0, input_1):
primals_1 = self.conv_r.weight
primals_2 = self.conv_r.bias
primals_4 = self.conv_i.weight
primals_5 = self.conv_i.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
|
drydenwiebe/complexPyTorch
|
ComplexConv2d
| false
| 12,323
|
[
"MIT"
] | 0
|
cea88ba7ee5692dfa1b40f0ba609ef14160d5073
|
https://github.com/drydenwiebe/complexPyTorch/tree/cea88ba7ee5692dfa1b40f0ba609ef14160d5073
|
Transition
|
import torch
import torch.nn as nn
import torch.nn.parallel
class Transition(nn.Module):
def __init__(self, in_features, out_features, act_layer=nn.GELU):
super(Transition, self).__init__()
self.act = act_layer()
self.linear = nn.Linear(in_features, out_features)
def forward(self, x):
x = self.linear(x)
x = self.act(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_gelu_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0
class TransitionNew(nn.Module):
def __init__(self, in_features, out_features, act_layer=nn.GELU):
super(TransitionNew, self).__init__()
self.act = act_layer()
self.linear = nn.Linear(in_features, out_features)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
druzhkov-paul/T2T-ViT
|
Transition
| false
| 12,324
|
[
"BSD-3-Clause-Clear"
] | 0
|
819c3ddc4cb6f464d4a9866d8713c7ace42ebf6c
|
https://github.com/druzhkov-paul/T2T-ViT/tree/819c3ddc4cb6f464d4a9866d8713c7ace42ebf6c
|
_TestNetStrided
|
import torch
import torch.nn.functional as F
import torch.nn
import torch.utils.data
import torch.utils.tensorboard._pytorch_graph
import torch.onnx.symbolic_caffe2
class _TestNetStrided(torch.nn.Module):
def __init__(self):
super(_TestNetStrided, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 20, kernel_size=5)
self.conv2 = torch.nn.Conv2d(20, 50, kernel_size=5, stride=(2, 2))
self.fc1 = torch.nn.Linear(200, 500)
self.fc2 = torch.nn.Linear(500, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2(x), 2))
x = x.view(-1, 200)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn
import torch.utils.data
import torch.utils.tensorboard._pytorch_graph
import torch.onnx.symbolic_caffe2
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 288000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3600 % 20
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_1(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 72000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x3 = xindex // 30
x2 = xindex // 18000
x4 = xindex % 18000
x5 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 120 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 120 * x3), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (60 + 2 * x0 + 120 * x3), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (61 + 2 * x0 + 120 * x3), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tl.store(out_ptr0 + (x4 + 18048 * x2), tmp15, xmask)
tl.store(out_ptr1 + x5, tmp18, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 33800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 169 % 50
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3(in_ptr0,
out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 7200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x5 = xindex // 36
x3 = xindex // 1800
x4 = xindex % 1800
tmp0 = tl.load(in_ptr0 + (2 * x0 + 26 * x1 + 169 * x5), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 26 * x1 + 169 * x5), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (13 + 2 * x0 + 26 * x1 + 169 * x5), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (14 + 2 * x0 + 26 * x1 + 169 * x5), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = 0.0
tmp20 = tmp18 <= tmp19
tl.store(out_ptr0 + (x4 + 1920 * x3), tmp15, xmask)
tl.store(out_ptr1 + (x4 + 1824 * x3), tmp18, xmask)
tl.store(out_ptr2 + (x4 + 1920 * x3), tmp20, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_view_4(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 7200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1824 * (x0 // 1800) + x0 % 1800), xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 18000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 500
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__log_softmax_6(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 36
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (20, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (20,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (50, 20, 5, 5), (500, 25, 5, 1))
assert_size_stride(primals_5, (50,), (1,))
assert_size_stride(primals_6, (500, 200), (200, 1))
assert_size_stride(primals_7, (500,), (1,))
assert_size_stride(primals_8, (10, 500), (500, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 20, 60, 60), (72000, 3600, 60, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(288000)](buf1, primals_2,
288000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 20, 30, 30), (18048, 900, 30, 1),
torch.int8)
buf3 = empty_strided_cuda((4, 20, 30, 30), (18000, 900, 30, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_relu_1[grid(72000)](buf1,
buf2, buf3, 72000, XBLOCK=512, num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 50, 13, 13), (8450, 169, 13, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(33800)](buf5, primals_5, 33800,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 50, 6, 6), (1920, 36, 6, 1), torch.int8)
buf7 = empty_strided_cuda((4, 50, 6, 6), (1824, 36, 6, 1), torch.
float32)
buf15 = empty_strided_cuda((4, 50, 6, 6), (1920, 36, 6, 1), torch.bool)
triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_3[grid
(7200)](buf5, buf6, buf7, buf15, 7200, XBLOCK=256, num_warps=4,
num_stages=1)
buf8 = empty_strided_cuda((36, 200), (200, 1), torch.float32)
triton_poi_fused_max_pool2d_with_indices_relu_view_4[grid(7200)](buf7,
buf8, 7200, XBLOCK=256, num_warps=4, num_stages=1)
del buf7
buf9 = empty_strided_cuda((36, 500), (500, 1), torch.float32)
extern_kernels.mm(buf8, reinterpret_tensor(primals_6, (200, 500), (
1, 200), 0), out=buf9)
buf10 = buf9
del buf9
triton_poi_fused_relu_5[grid(18000)](buf10, primals_7, 18000,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf11 = empty_strided_cuda((36, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, buf10, reinterpret_tensor(primals_8,
(500, 10), (1, 500), 0), alpha=1, beta=1, out=buf11)
del primals_9
buf14 = empty_strided_cuda((36, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_6[grid(36)](buf11, buf14, 36, 10,
XBLOCK=1, num_warps=2, num_stages=1)
del buf11
return (buf14, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5,
buf6, buf8, buf10, buf14, primals_8, primals_6, buf15)
class _TestNetStridedNew(torch.nn.Module):
def __init__(self):
super(_TestNetStridedNew, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 20, kernel_size=5)
self.conv2 = torch.nn.Conv2d(20, 50, kernel_size=5, stride=(2, 2))
self.fc1 = torch.nn.Linear(200, 500)
self.fc2 = torch.nn.Linear(500, 10)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
arjunsuresh/aimet
|
_TestNetStrided
| false
| 12,325
|
[
"BSD-3-Clause"
] | 0
|
f6e09cb07a91eed3a5e6b8e19e6b065303af5a39
|
https://github.com/arjunsuresh/aimet/tree/f6e09cb07a91eed3a5e6b8e19e6b065303af5a39
|
Concat
|
import torch
import torch.nn
import torch.utils.data
import torch.utils.tensorboard._pytorch_graph
import torch.onnx.symbolic_caffe2
class Concat(torch.nn.Module):
""" Concat module for a functional concat"""
def __init__(self, axis: 'int'=0):
super(Concat, self).__init__()
self.axis = axis
def forward(self, x, y):
"""
Forward-pass routine for divide op
"""
return torch.cat((x, y), self.axis)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn
import torch.utils.data
import torch.utils.tensorboard._pytorch_graph
import torch.onnx.symbolic_caffe2
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 64
x0 = xindex % 64
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x1), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 64 * (-4 + x1)), tmp6 & xmask, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((8, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](arg0_1, arg1_1, buf0, 512, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class ConcatNew(torch.nn.Module):
""" Concat module for a functional concat"""
def __init__(self, axis: 'int'=0):
super(ConcatNew, self).__init__()
self.axis = axis
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
arjunsuresh/aimet
|
Concat
| false
| 12,326
|
[
"BSD-3-Clause"
] | 0
|
f6e09cb07a91eed3a5e6b8e19e6b065303af5a39
|
https://github.com/arjunsuresh/aimet/tree/f6e09cb07a91eed3a5e6b8e19e6b065303af5a39
|
EdgeCaseModel
|
import torch
from typing import Any
import torch.nn as nn
class LayerWithRidiculouslyLongNameAndDoesntDoAnything(nn.Module):
""" Model with a very long name. """
def __init__(self) ->None:
super().__init__()
self.identity = nn.Identity()
def forward(self, x: 'Any') ->Any:
return self.identity(x)
class EdgeCaseModel(nn.Module):
""" Model that throws an exception when used. """
def __init__(self, throw_error: 'bool'=False, return_str: 'bool'=False,
return_class: 'bool'=False) ->None:
super().__init__()
self.throw_error = throw_error
self.return_str = return_str
self.return_class = return_class
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.model = LayerWithRidiculouslyLongNameAndDoesntDoAnything()
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
x = self.conv1(x)
x = self.model('string output' if self.return_str else x)
if self.throw_error:
x = self.conv1(x)
if self.return_class:
x = self.model(EdgeCaseModel)
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from typing import Any
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 144000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3600 % 10
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (10, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (10,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 10, 60, 60), (36000, 3600, 60, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(144000)](buf1, primals_2,
144000, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
class LayerWithRidiculouslyLongNameAndDoesntDoAnything(nn.Module):
""" Model with a very long name. """
def __init__(self) ->None:
super().__init__()
self.identity = nn.Identity()
def forward(self, x: 'Any') ->Any:
return self.identity(x)
class EdgeCaseModelNew(nn.Module):
""" Model that throws an exception when used. """
def __init__(self, throw_error: 'bool'=False, return_str: 'bool'=False,
return_class: 'bool'=False) ->None:
super().__init__()
self.throw_error = throw_error
self.return_str = return_str
self.return_class = return_class
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.model = LayerWithRidiculouslyLongNameAndDoesntDoAnything()
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
e-dorigatti/torchinfo
|
EdgeCaseModel
| false
| 12,327
|
[
"MIT"
] | 0
|
9fa0e677fb7002e89afd5b1bb372fe8c1dd813d6
|
https://github.com/e-dorigatti/torchinfo/tree/9fa0e677fb7002e89afd5b1bb372fe8c1dd813d6
|
ComplexConvTranspose2d
|
from torch.nn import Module
import torch
from torch.nn import ConvTranspose2d
class ComplexConvTranspose2d(Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, output_padding=0, groups=1, bias=True, dilation=1,
padding_mode='zeros'):
super(ComplexConvTranspose2d, self).__init__()
self.conv_tran_r = ConvTranspose2d(in_channels, out_channels,
kernel_size, stride, padding, output_padding, groups, bias,
dilation, padding_mode)
self.conv_tran_i = ConvTranspose2d(in_channels, out_channels,
kernel_size, stride, padding, output_padding, groups, bias,
dilation, padding_mode)
def forward(self, input_r, input_i):
return self.conv_tran_r(input_r) - self.conv_tran_i(input_i
), self.conv_tran_r(input_i) + self.conv_tran_i(input_r)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
from torch.nn import ConvTranspose2d
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_add_convolution_sub_0(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 49 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_out_ptr1 + x3, xmask)
tmp9 = tl.load(in_ptr3 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 - tmp5
tmp8 = tmp7 + tmp1
tmp10 = tmp9 + tmp4
tmp11 = tmp8 + tmp10
tl.store(in_out_ptr0 + x3, tmp6, xmask)
tl.store(in_out_ptr1 + x3, tmp11, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 7, 7), (196, 49, 7, 1))
buf1 = extern_kernels.convolution(primals_6, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 7, 7), (196, 49, 7, 1))
buf3 = extern_kernels.convolution(primals_6, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 7, 7), (196, 49, 7, 1))
buf4 = extern_kernels.convolution(primals_3, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 7, 7), (196, 49, 7, 1))
buf2 = buf0
del buf0
buf5 = buf3
del buf3
get_raw_stream(0)
triton_poi_fused_add_convolution_sub_0[grid(784)](buf2, buf5,
primals_2, buf1, primals_5, buf4, 784, XBLOCK=256, num_warps=4,
num_stages=1)
del buf1
del buf4
del primals_2
del primals_5
return buf2, buf5, primals_1, primals_3, primals_4, primals_6
class ComplexConvTranspose2dNew(Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, output_padding=0, groups=1, bias=True, dilation=1,
padding_mode='zeros'):
super(ComplexConvTranspose2dNew, self).__init__()
self.conv_tran_r = ConvTranspose2d(in_channels, out_channels,
kernel_size, stride, padding, output_padding, groups, bias,
dilation, padding_mode)
self.conv_tran_i = ConvTranspose2d(in_channels, out_channels,
kernel_size, stride, padding, output_padding, groups, bias,
dilation, padding_mode)
def forward(self, input_0, input_1):
primals_1 = self.conv_tran_r.weight
primals_2 = self.conv_tran_r.bias
primals_3 = self.conv_tran_i.weight
primals_5 = self.conv_tran_i.bias
primals_4 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
|
drydenwiebe/complexPyTorch
|
ComplexConvTranspose2d
| false
| 12,328
|
[
"MIT"
] | 0
|
cea88ba7ee5692dfa1b40f0ba609ef14160d5073
|
https://github.com/drydenwiebe/complexPyTorch/tree/cea88ba7ee5692dfa1b40f0ba609ef14160d5073
|
OuterProductLayer
|
import torch
import torch.nn as nn
class OuterProductLayer(nn.Module):
"""OuterProduct Layer used in PNN. This implementation is
adapted from code that the author of the paper published on https://github.com/Atomu2014/product-nets.
"""
def __init__(self, num_feature_field, embedding_size, device):
"""
Args:
num_feature_field(int) :number of feature fields.
embedding_size(int) :number of embedding size.
device(torch.device) : device object of the model.
"""
super(OuterProductLayer, self).__init__()
self.num_feature_field = num_feature_field
num_pairs = int(num_feature_field * (num_feature_field - 1) / 2)
embed_size = embedding_size
self.kernel = nn.Parameter(torch.rand(embed_size, num_pairs,
embed_size), requires_grad=True)
nn.init.xavier_uniform_(self.kernel)
self
def forward(self, feat_emb):
"""
Args:
feat_emb(torch.FloatTensor) :3D tensor with shape: [batch_size,num_pairs,embedding_size].
Returns:
outer_product(torch.FloatTensor): The outer product of input tensor. shape of [batch_size, num_pairs]
"""
row = []
col = []
for i in range(self.num_feature_field - 1):
for j in range(i + 1, self.num_feature_field):
row.append(i)
col.append(j)
p = feat_emb[:, row]
q = feat_emb[:, col]
p.unsqueeze_(dim=1)
p = torch.mul(p, self.kernel.unsqueeze(0))
p = torch.sum(p, dim=-1)
p = torch.transpose(p, 2, 1)
outer_product = p * q
return outer_product.sum(dim=-1)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'num_feature_field': 4, 'embedding_size': 4, 'device': 0}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 96
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x2 = xindex // 24
x3 = xindex % 24
x4 = xindex
tmp18 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp0 = x0
tmp1 = tl.full([1], 3, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 2, tl.int64)
tmp6 = tmp0 < tmp5
tmp7 = tl.full([1], 0, tl.int64)
tmp8 = tl.where(tmp6, tmp7, tmp7)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tl.full([1], 4, tl.int64)
tmp11 = tmp0 < tmp10
tmp12 = tl.full([1], 5, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.where(tmp13, tmp3, tmp5)
tmp15 = tl.where(tmp11, tmp3, tmp14)
tmp16 = tl.where(tmp2, tmp9, tmp15)
tmp17 = tl.load(in_ptr0 + (4 * tmp16 + 16 * x2), xmask, eviction_policy
='evict_last')
tmp19 = tmp17 * tmp18
tmp20 = tl.load(in_ptr0 + (1 + 4 * tmp16 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp22 = tmp20 * tmp21
tmp23 = tmp19 + tmp22
tmp24 = tl.load(in_ptr0 + (2 + 4 * tmp16 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp26 = tmp24 * tmp25
tmp27 = tmp23 + tmp26
tmp28 = tl.load(in_ptr0 + (3 + 4 * tmp16 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp30 = tmp28 * tmp29
tmp31 = tmp27 + tmp30
tl.store(out_ptr0 + x4, tmp31, xmask)
@triton.jit
def triton_poi_fused_index_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 24
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 24 * x1), xmask)
tmp19 = tl.load(in_ptr0 + (6 + x0 + 24 * x1), xmask)
tmp23 = tl.load(in_ptr0 + (12 + x0 + 24 * x1), xmask)
tmp27 = tl.load(in_ptr0 + (18 + x0 + 24 * x1), xmask)
tmp1 = x0
tmp2 = tl.full([1], 3, tl.int64)
tmp3 = tmp1 < tmp2
tmp4 = tl.full([1], 1, tl.int64)
tmp5 = tmp1 < tmp4
tmp6 = tl.full([1], 2, tl.int64)
tmp7 = tmp1 < tmp6
tmp8 = tl.where(tmp7, tmp6, tmp2)
tmp9 = tl.where(tmp5, tmp4, tmp8)
tmp10 = tl.full([1], 4, tl.int64)
tmp11 = tmp1 < tmp10
tmp12 = tl.full([1], 5, tl.int64)
tmp13 = tmp1 < tmp12
tmp14 = tl.where(tmp13, tmp2, tmp2)
tmp15 = tl.where(tmp11, tmp6, tmp14)
tmp16 = tl.where(tmp3, tmp9, tmp15)
tmp17 = tl.load(in_ptr1 + (4 * tmp16 + 16 * x1), xmask, eviction_policy
='evict_last')
tmp18 = tmp0 * tmp17
tmp20 = tl.load(in_ptr1 + (1 + 4 * tmp16 + 16 * x1), xmask,
eviction_policy='evict_last')
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp24 = tl.load(in_ptr1 + (2 + 4 * tmp16 + 16 * x1), xmask,
eviction_policy='evict_last')
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp28 = tl.load(in_ptr1 + (3 + 4 * tmp16 + 16 * x1), xmask,
eviction_policy='evict_last')
tmp29 = tmp27 * tmp28
tmp30 = tmp26 + tmp29
tl.store(out_ptr0 + x2, tmp30, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 6, 4), (24, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6), (24, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sum_0[grid(96)](primals_1, primals_2, buf0, 96,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4, 6), (6, 1), torch.float32)
triton_poi_fused_index_mul_sum_1[grid(24)](buf0, primals_1, buf1,
24, XBLOCK=32, num_warps=1, num_stages=1)
del buf0
return buf1, primals_1
class OuterProductLayerNew(nn.Module):
"""OuterProduct Layer used in PNN. This implementation is
adapted from code that the author of the paper published on https://github.com/Atomu2014/product-nets.
"""
def __init__(self, num_feature_field, embedding_size, device):
"""
Args:
num_feature_field(int) :number of feature fields.
embedding_size(int) :number of embedding size.
device(torch.device) : device object of the model.
"""
super(OuterProductLayerNew, self).__init__()
self.num_feature_field = num_feature_field
num_pairs = int(num_feature_field * (num_feature_field - 1) / 2)
embed_size = embedding_size
self.kernel = nn.Parameter(torch.rand(embed_size, num_pairs,
embed_size), requires_grad=True)
nn.init.xavier_uniform_(self.kernel)
self
def forward(self, input_0):
primals_2 = self.kernel
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
dreaming-qin/RecBole
|
OuterProductLayer
| false
| 12,329
|
[
"MIT"
] | 0
|
d6de39521484ded60c387ca604abaf86310acdbe
|
https://github.com/dreaming-qin/RecBole/tree/d6de39521484ded60c387ca604abaf86310acdbe
|
Net
|
import torch
import torch.nn as nn
class Net(nn.Module):
def __init__(self, input_placeholder, output_size):
super(Net, self).__init__()
self.fc1 = nn.Linear(input_placeholder, 255)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(255, 255)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(255, output_size)
def forward(self, x):
out = self.fc1(x)
out = self.relu1(out)
out = self.fc2(out)
out = self.relu2(out)
out = self.fc3(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_placeholder': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 255
x2 = xindex // 4080
x3 = xindex % 4080
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + 4096 * x2), tmp4, xmask)
tl.store(out_ptr1 + (x3 + 4096 * x2), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 255
x1 = xindex // 255
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 255 * (x1 % 16) + 4096 * (x1 // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (255, 4), (4, 1))
assert_size_stride(primals_2, (255,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (255, 255), (255, 1))
assert_size_stride(primals_5, (255,), (1,))
assert_size_stride(primals_6, (4, 255), (255, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 255), (255, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 255), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 255), (4096, 1020, 255, 1),
torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 255), (4096, 1020, 255, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16320)](buf0,
primals_2, buf1, buf8, 16320, XBLOCK=256, num_warps=4, num_stages=1
)
del primals_2
buf2 = buf0
del buf0
triton_poi_fused_relu_view_1[grid(16320)](buf1, buf2, 16320, XBLOCK
=256, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((64, 255), (255, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (255, 255), (
1, 255), 0), out=buf3)
buf4 = buf1
del buf1
buf7 = empty_strided_cuda((4, 4, 4, 255), (4096, 1020, 255, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(16320)](buf3,
primals_5, buf4, buf7, 16320, XBLOCK=256, num_warps=4, num_stages=1
)
del primals_5
buf5 = buf3
del buf3
triton_poi_fused_relu_view_1[grid(16320)](buf4, buf5, 16320, XBLOCK
=256, num_warps=4, num_stages=1)
del buf4
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, buf5, reinterpret_tensor(primals_6,
(255, 4), (1, 255), 0), alpha=1, beta=1, out=buf6)
del primals_7
return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf2, buf5, primals_6, buf7, primals_4, buf8
class NetNew(nn.Module):
def __init__(self, input_placeholder, output_size):
super(NetNew, self).__init__()
self.fc1 = nn.Linear(input_placeholder, 255)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(255, 255)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(255, output_size)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
dylan-albertazzi/Berkely_DeepRL
|
Net
| false
| 12,330
|
[
"MIT"
] | 0
|
997d066df7b429f6ad365dca8105490dae8f978e
|
https://github.com/dylan-albertazzi/Berkely_DeepRL/tree/997d066df7b429f6ad365dca8105490dae8f978e
|
MPNetAttention
|
from _paritybench_helpers import _mock_config
import math
import torch
from typing import List
from typing import Tuple
from torch import nn
from typing import Set
import torch.utils.checkpoint
def find_pruneable_heads_and_indices(heads: 'List[int]', n_heads: 'int',
head_size: 'int', already_pruned_heads: 'Set[int]') ->Tuple[Set[int],
torch.LongTensor]:
"""
Finds the heads and their indices taking :obj:`already_pruned_heads` into account.
Args:
heads (:obj:`List[int]`): List of the indices of heads to prune.
n_heads (:obj:`int`): The number of heads in the model.
head_size (:obj:`int`): The size of each head.
already_pruned_heads (:obj:`Set[int]`): A set of already pruned heads.
Returns:
:obj:`Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices.
"""
mask = torch.ones(n_heads, head_size)
heads = set(heads) - already_pruned_heads
for head in heads:
head = head - sum(1 if h < head else 0 for h in already_pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index: 'torch.LongTensor' = torch.arange(len(mask))[mask].long()
return heads, index
def prune_linear_layer(layer: 'nn.Linear', index: 'torch.LongTensor', dim:
'int'=0) ->nn.Linear:
"""
Prune a linear layer to keep only entries in index.
Used to remove heads.
Args:
layer (:obj:`torch.nn.Linear`): The layer to prune.
index (:obj:`torch.LongTensor`): The indices to keep in the layer.
dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices.
Returns:
:obj:`torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`.
"""
index = index
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None
)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
class MPNetSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if (config.hidden_size % config.num_attention_heads != 0 and not
hasattr(config, 'embedding_size')):
raise ValueError(
f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})'
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.q = nn.Linear(config.hidden_size, self.all_head_size)
self.k = nn.Linear(config.hidden_size, self.all_head_size)
self.v = nn.Linear(config.hidden_size, self.all_head_size)
self.o = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask=None, head_mask=None,
position_bias=None, output_attentions=False, **kwargs):
q = self.q(hidden_states)
k = self.k(hidden_states)
v = self.v(hidden_states)
q = self.transpose_for_scores(q)
k = self.transpose_for_scores(k)
v = self.transpose_for_scores(v)
attention_scores = torch.matmul(q, k.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
if position_bias is not None:
attention_scores += position_bias
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
c = torch.matmul(attention_probs, v)
c = c.permute(0, 2, 1, 3).contiguous()
new_c_shape = c.size()[:-2] + (self.all_head_size,)
c = c.view(*new_c_shape)
o = self.o(c)
outputs = (o, attention_probs) if output_attentions else (o,)
return outputs
class MPNetAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.attn = MPNetSelfAttention(config)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.attn.
num_attention_heads, self.attn.attention_head_size, self.
pruned_heads)
self.attn.q = prune_linear_layer(self.attn.q, index)
self.attn.k = prune_linear_layer(self.attn.k, index)
self.attn.v = prune_linear_layer(self.attn.v, index)
self.attn.o = prune_linear_layer(self.attn.o, index, dim=1)
self.attn.num_attention_heads = self.attn.num_attention_heads - len(
heads)
self.attn.all_head_size = (self.attn.attention_head_size * self.
attn.num_attention_heads)
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states, attention_mask=None, head_mask=None,
position_bias=None, output_attentions=False, **kwargs):
self_outputs = self.attn(hidden_states, attention_mask, head_mask,
position_bias, output_attentions=output_attentions)
attention_output = self.LayerNorm(self.dropout(self_outputs[0]) +
hidden_states)
outputs = (attention_output,) + self_outputs[1:]
return outputs
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, num_attention_heads=
4, attention_probs_dropout_prob=0.5, layer_norm_eps=1,
hidden_dropout_prob=0.5)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
from typing import List
from typing import Tuple
from torch import nn
from typing import Set
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr1 + x2, xmask)
tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = float('-inf')
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = tmp4 != 0
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = tmp9 != 0
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = tmp15 != 0
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = tmp21 != 0
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + x2, tmp35, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf8, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_7
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0)
del buf9
extern_kernels.addmm(primals_9, reinterpret_tensor(buf10, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf11)
del primals_9
buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(16)](buf11, primals_3,
buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(64)](buf11, primals_3,
buf12, buf13, primals_10, primals_11, buf14, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf12
del buf13
del primals_11
return buf14, primals_3, primals_10, buf7, reinterpret_tensor(buf8, (16,
1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0
), reinterpret_tensor(buf10, (16, 4), (4, 1), 0), buf11, primals_8
def find_pruneable_heads_and_indices(heads: 'List[int]', n_heads: 'int',
head_size: 'int', already_pruned_heads: 'Set[int]') ->Tuple[Set[int],
torch.LongTensor]:
"""
Finds the heads and their indices taking :obj:`already_pruned_heads` into account.
Args:
heads (:obj:`List[int]`): List of the indices of heads to prune.
n_heads (:obj:`int`): The number of heads in the model.
head_size (:obj:`int`): The size of each head.
already_pruned_heads (:obj:`Set[int]`): A set of already pruned heads.
Returns:
:obj:`Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices.
"""
mask = torch.ones(n_heads, head_size)
heads = set(heads) - already_pruned_heads
for head in heads:
head = head - sum(1 if h < head else 0 for h in already_pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index: 'torch.LongTensor' = torch.arange(len(mask))[mask].long()
return heads, index
def prune_linear_layer(layer: 'nn.Linear', index: 'torch.LongTensor', dim:
'int'=0) ->nn.Linear:
"""
Prune a linear layer to keep only entries in index.
Used to remove heads.
Args:
layer (:obj:`torch.nn.Linear`): The layer to prune.
index (:obj:`torch.LongTensor`): The indices to keep in the layer.
dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices.
Returns:
:obj:`torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`.
"""
index = index
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None
)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
class MPNetSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if (config.hidden_size % config.num_attention_heads != 0 and not
hasattr(config, 'embedding_size')):
raise ValueError(
f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})'
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.q = nn.Linear(config.hidden_size, self.all_head_size)
self.k = nn.Linear(config.hidden_size, self.all_head_size)
self.v = nn.Linear(config.hidden_size, self.all_head_size)
self.o = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask=None, head_mask=None,
position_bias=None, output_attentions=False, **kwargs):
q = self.q(hidden_states)
k = self.k(hidden_states)
v = self.v(hidden_states)
q = self.transpose_for_scores(q)
k = self.transpose_for_scores(k)
v = self.transpose_for_scores(v)
attention_scores = torch.matmul(q, k.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
if position_bias is not None:
attention_scores += position_bias
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
c = torch.matmul(attention_probs, v)
c = c.permute(0, 2, 1, 3).contiguous()
new_c_shape = c.size()[:-2] + (self.all_head_size,)
c = c.view(*new_c_shape)
o = self.o(c)
outputs = (o, attention_probs) if output_attentions else (o,)
return outputs
class MPNetAttentionNew(nn.Module):
def __init__(self, config):
super().__init__()
self.attn = MPNetSelfAttention(config)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.attn.
num_attention_heads, self.attn.attention_head_size, self.
pruned_heads)
self.attn.q = prune_linear_layer(self.attn.q, index)
self.attn.k = prune_linear_layer(self.attn.k, index)
self.attn.v = prune_linear_layer(self.attn.v, index)
self.attn.o = prune_linear_layer(self.attn.o, index, dim=1)
self.attn.num_attention_heads = self.attn.num_attention_heads - len(
heads)
self.attn.all_head_size = (self.attn.attention_head_size * self.
attn.num_attention_heads)
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, input_0):
primals_1 = self.attn.q.weight
primals_2 = self.attn.q.bias
primals_4 = self.attn.k.weight
primals_5 = self.attn.k.bias
primals_6 = self.attn.v.weight
primals_7 = self.attn.v.bias
primals_8 = self.attn.o.weight
primals_9 = self.attn.o.bias
primals_10 = self.LayerNorm.weight
primals_11 = self.LayerNorm.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
Clemens123/transformers
|
MPNetAttention
| false
| 12,331
|
[
"Apache-2.0"
] | 0
|
22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26
|
https://github.com/Clemens123/transformers/tree/22abe7bbc587c16ec30f9d1aa549dcbeba6e9e26
|
NoiseLayer
|
import torch
import torch.nn as nn
class NoiseLayer(nn.Module):
"""adds noise. noise is per pixel (constant over channels) with per-channel weight"""
def __init__(self, channels):
super().__init__()
self.weight = nn.Parameter(torch.zeros(channels))
self.noise = None
def forward(self, x, noise=None):
if noise is None and self.noise is None:
noise = torch.randn(x.size(0), 1, x.size(2), x.size(3), device=
x.device, dtype=x.dtype)
elif noise is None:
noise = self.noise
x = x + self.weight.view(1, -1, 1, 1) * noise
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
|
import torch
from torch import device
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tl.store(out_ptr0 + x3, tmp4, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten.randn.default([4, 1, 4, 4], dtype=torch.
float32, device=device(type='cuda', index=0), pin_memory=False)
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(256)](primals_1, primals_2, buf1,
buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf2, buf1
class NoiseLayerNew(nn.Module):
"""adds noise. noise is per pixel (constant over channels) with per-channel weight"""
def __init__(self, channels):
super().__init__()
self.weight = nn.Parameter(torch.zeros(channels))
self.noise = None
def forward(self, input_0):
primals_2 = self.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
eitanrich/ganspace-manifold
|
NoiseLayer
| false
| 12,332
|
[
"Apache-2.0"
] | 0
|
148d5d30001c43794a40bbed885601e7816f5d7d
|
https://github.com/eitanrich/ganspace-manifold/tree/148d5d30001c43794a40bbed885601e7816f5d7d
|
KnowledgeDistillationLoss
|
import torch
from torch import nn
class KnowledgeDistillationLoss(nn.Module):
def __init__(self, reduction='mean', alpha=1.0):
super().__init__()
self.reduction = reduction
self.alpha = alpha
def forward(self, inputs, targets, mask=None):
inputs = inputs.narrow(1, 0, targets.shape[1])
outputs = torch.log_softmax(inputs, dim=1)
labels = torch.softmax(targets * self.alpha, dim=1)
loss = (outputs * labels).mean(dim=1)
if mask is not None:
loss = loss * mask.float()
if self.reduction == 'mean':
outputs = -torch.mean(loss)
elif self.reduction == 'sum':
outputs = -torch.sum(loss)
else:
outputs = -loss
return outputs
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused__log_softmax__softmax_mul_2(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr1 + x3, xmask)
tmp15 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp17 = tmp15 + tmp16
tmp19 = tmp17 + tmp18
tmp21 = tmp19 + tmp20
tmp22 = tmp14 / tmp21
tmp23 = tmp13 * tmp22
tl.store(out_ptr0 + x3, tmp23, xmask)
@triton.jit
def triton_per_fused_mean_neg_3(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp3 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = 64.0
tmp13 = tmp11 / tmp12
tmp14 = -tmp13
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp14, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](arg1_1, buf1, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax__softmax_mul_2[grid(256)](buf0, buf1,
buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del buf1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused_mean_neg_3[grid(1)](buf4, buf2, 1, 64, XBLOCK=1,
num_warps=2, num_stages=1)
del buf2
return buf4,
class KnowledgeDistillationLossNew(nn.Module):
def __init__(self, reduction='mean', alpha=1.0):
super().__init__()
self.reduction = reduction
self.alpha = alpha
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
edoardofantolino/MLDLproject4
|
KnowledgeDistillationLoss
| false
| 12,333
|
[
"MIT"
] | 0
|
fed0cfd51f5984bbf21205a43ea43dc49f4d289a
|
https://github.com/edoardofantolino/MLDLproject4/tree/fed0cfd51f5984bbf21205a43ea43dc49f4d289a
|
SubpixelConvolutionLayer
|
import torch
import torch.nn as nn
class SubpixelConvolutionLayer(nn.Module):
def __init__(self, channels: 'int'=64) ->None:
"""
Args:
channels (int): Number of channels in the input image. (Default: 64)
"""
super(SubpixelConvolutionLayer, self).__init__()
self.conv = nn.Conv2d(channels, channels * 4, kernel_size=3, stride
=1, padding=1)
self.pixel_shuffle = nn.PixelShuffle(upscale_factor=2)
self.prelu = nn.PReLU()
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
out = self.conv(x)
out = self.pixel_shuffle(out)
out = self.prelu(out)
return out
def get_inputs():
return [torch.rand([4, 64, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 262144 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_poi_fused__prelu_kernel_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = xindex // 128 % 128
x2 = xindex // 16384 % 64
x3 = xindex // 1048576
x4 = xindex
tmp0 = tl.load(in_ptr0 + (2 * (x1 % 2) + 4 * x2 + 256 * (x0 // 2) +
16384 * (x1 // 2) + 1048576 * x3 + x0 % 2), None)
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp5 = tmp4 * tmp0
tmp6 = tl.where(tmp2, tmp0, tmp5)
tl.store(out_ptr0 + x4, tmp6, None)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (256, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 64, 64, 64), (262144, 4096, 64, 1))
assert_size_stride(primals_4, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((256, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16384, 9)](primals_1, buf0, 16384, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64),
torch.float32)
triton_poi_fused_1[grid(256, 4096)](primals_3, buf1, 256, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 256, 64, 64), (1048576, 1, 16384, 256))
buf3 = buf2
del buf2
triton_poi_fused_convolution_2[grid(4194304)](buf3, primals_2,
4194304, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf4 = empty_strided_cuda((4, 64, 128, 128), (1048576, 16384, 128,
1), torch.float32)
triton_poi_fused__prelu_kernel_3[grid(4194304)](buf3, primals_4,
buf4, 4194304, XBLOCK=512, num_warps=8, num_stages=1)
return buf4, buf0, buf1, primals_4, buf3
class SubpixelConvolutionLayerNew(nn.Module):
def __init__(self, channels: 'int'=64) ->None:
"""
Args:
channels (int): Number of channels in the input image. (Default: 64)
"""
super(SubpixelConvolutionLayerNew, self).__init__()
self.conv = nn.Conv2d(channels, channels * 4, kernel_size=3, stride
=1, padding=1)
self.pixel_shuffle = nn.PixelShuffle(upscale_factor=2)
self.prelu = nn.PReLU()
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_4 = self.prelu.weight
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
duylebkHCM/Anime-Face-Generator-
|
SubpixelConvolutionLayer
| false
| 12,334
|
[
"MIT"
] | 0
|
ffcbe22f2073971e81b1bbc61b7ef7970889f8a2
|
https://github.com/duylebkHCM/Anime-Face-Generator-/tree/ffcbe22f2073971e81b1bbc61b7ef7970889f8a2
|
RecursiveNet
|
import torch
from typing import Any
import torch.nn as nn
class RecursiveNet(nn.Module):
""" Model that uses a layer recursively in computation. """
def __init__(self) ->None:
super().__init__()
self.conv1 = nn.Conv2d(64, 64, 3, 1, 1)
def forward(self, x: 'torch.Tensor', args1: 'Any'=None, args2: 'Any'=None
) ->torch.Tensor:
del args1, args2
out = x
for _ in range(3):
out = self.conv1(out)
out = self.conv1(out)
return out
def get_inputs():
return [torch.rand([4, 64, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 64, 64, 64), (262144, 4096, 64, 1))
assert_size_stride(primals_2, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_3, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(1048576)](buf1, primals_3,
1048576, XBLOCK=512, num_warps=8, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_0[grid(1048576)](buf3, primals_3,
1048576, XBLOCK=512, num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_0[grid(1048576)](buf5, primals_3,
1048576, XBLOCK=512, num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf5, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_0[grid(1048576)](buf7, primals_3,
1048576, XBLOCK=512, num_warps=8, num_stages=1)
buf8 = extern_kernels.convolution(buf7, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_0[grid(1048576)](buf9, primals_3,
1048576, XBLOCK=512, num_warps=8, num_stages=1)
buf10 = extern_kernels.convolution(buf9, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf11 = buf10
del buf10
triton_poi_fused_convolution_0[grid(1048576)](buf11, primals_3,
1048576, XBLOCK=512, num_warps=8, num_stages=1)
del primals_3
return buf11, primals_1, primals_2, buf1, buf3, buf5, buf7, buf9
class RecursiveNetNew(nn.Module):
""" Model that uses a layer recursively in computation. """
def __init__(self) ->None:
super().__init__()
self.conv1 = nn.Conv2d(64, 64, 3, 1, 1)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
e-dorigatti/torchinfo
|
RecursiveNet
| false
| 12,335
|
[
"MIT"
] | 0
|
9fa0e677fb7002e89afd5b1bb372fe8c1dd813d6
|
https://github.com/e-dorigatti/torchinfo/tree/9fa0e677fb7002e89afd5b1bb372fe8c1dd813d6
|
MyLinear
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class MyLinear(nn.Module):
"""Linear layer with equalized learning rate and custom learning rate multiplier."""
def __init__(self, input_size, output_size, gain=2 ** 0.5, use_wscale=
False, lrmul=1, bias=True):
super().__init__()
he_std = gain * input_size ** -0.5
if use_wscale:
init_std = 1.0 / lrmul
self.w_mul = he_std * lrmul
else:
init_std = he_std / lrmul
self.w_mul = lrmul
self.weight = torch.nn.Parameter(torch.randn(output_size,
input_size) * init_std)
if bias:
self.bias = torch.nn.Parameter(torch.zeros(output_size))
self.b_mul = lrmul
else:
self.bias = None
def forward(self, x):
bias = self.bias
if bias is not None:
bias = bias * self.b_mul
return F.linear(x, self.weight * self.w_mul, bias)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_mul_1[grid(4)](primals_1, buf1, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_1
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(buf1, reinterpret_tensor(primals_3, (64, 4), (
4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1,
beta=1, out=buf2)
del buf0
del buf1
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0)
class MyLinearNew(nn.Module):
"""Linear layer with equalized learning rate and custom learning rate multiplier."""
def __init__(self, input_size, output_size, gain=2 ** 0.5, use_wscale=
False, lrmul=1, bias=True):
super().__init__()
he_std = gain * input_size ** -0.5
if use_wscale:
init_std = 1.0 / lrmul
self.w_mul = he_std * lrmul
else:
init_std = he_std / lrmul
self.w_mul = lrmul
self.weight = torch.nn.Parameter(torch.randn(output_size,
input_size) * init_std)
if bias:
self.bias = torch.nn.Parameter(torch.zeros(output_size))
self.b_mul = lrmul
else:
self.bias = None
def forward(self, input_0):
primals_2 = self.weight
primals_1 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
eitanrich/ganspace-manifold
|
MyLinear
| false
| 12,336
|
[
"Apache-2.0"
] | 0
|
148d5d30001c43794a40bbed885601e7816f5d7d
|
https://github.com/eitanrich/ganspace-manifold/tree/148d5d30001c43794a40bbed885601e7816f5d7d
|
BCELoss2d
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class BCELoss2d(nn.Module):
def __init__(self, weight=None, size_average=True):
"""
Imlements Binary Cross Entropy loss function.
"""
super(BCELoss2d, self).__init__()
self.bce_loss = nn.BCELoss(weight, size_average)
def forward(self, logits, targets):
probs = F.sigmoid(logits)
probs_flat = probs.view(-1)
targets_flat = targets.view(-1)
return self.bce_loss(probs_flat, targets_flat)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 - tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = -tmp4
tmp6 = libdevice.log1p(tmp5)
tmp7 = -100.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp2 * tmp8
tmp10 = tl_math.log(tmp4)
tmp11 = triton_helpers.maximum(tmp10, tmp7)
tmp12 = tmp0 * tmp11
tmp13 = tmp9 - tmp12
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = 256.0
tmp18 = tmp16 / tmp17
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_0[grid(1)](buf1, arg1_1,
arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class BCELoss2dNew(nn.Module):
def __init__(self, weight=None, size_average=True):
"""
Imlements Binary Cross Entropy loss function.
"""
super(BCELoss2dNew, self).__init__()
self.bce_loss = nn.BCELoss(weight, size_average)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
ekalyashov/segmentation-unet
|
BCELoss2d
| false
| 12,337
|
[
"MIT"
] | 0
|
59dc95419481b2535a52332e0be92b15c7450674
|
https://github.com/ekalyashov/segmentation-unet/tree/59dc95419481b2535a52332e0be92b15c7450674
|
StyleMod
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class MyLinear(nn.Module):
"""Linear layer with equalized learning rate and custom learning rate multiplier."""
def __init__(self, input_size, output_size, gain=2 ** 0.5, use_wscale=
False, lrmul=1, bias=True):
super().__init__()
he_std = gain * input_size ** -0.5
if use_wscale:
init_std = 1.0 / lrmul
self.w_mul = he_std * lrmul
else:
init_std = he_std / lrmul
self.w_mul = lrmul
self.weight = torch.nn.Parameter(torch.randn(output_size,
input_size) * init_std)
if bias:
self.bias = torch.nn.Parameter(torch.zeros(output_size))
self.b_mul = lrmul
else:
self.bias = None
def forward(self, x):
bias = self.bias
if bias is not None:
bias = bias * self.b_mul
return F.linear(x, self.weight * self.w_mul, bias)
class StyleMod(nn.Module):
def __init__(self, latent_size, channels, use_wscale):
super(StyleMod, self).__init__()
self.lin = MyLinear(latent_size, channels * 2, gain=1.0, use_wscale
=use_wscale)
def forward(self, x, latent):
style = self.lin(latent)
shape = [-1, 2, x.size(1)] + (x.dim() - 2) * [1]
style = style.view(shape)
x = x * (style[:, 0] + 1.0) + style[:, 1]
return x
def get_inputs():
return [torch.rand([64, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'latent_size': 4, 'channels': 4, 'use_wscale': 1.0}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 4
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (x1 + 8 * x2), None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (4 + x1 + 8 * x2), None, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr2 + (4 + x1), None, eviction_policy='evict_last')
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp5 = tmp1 + tmp4
tmp6 = tmp5 + tmp3
tmp7 = tmp0 * tmp6
tmp10 = tmp9 * tmp3
tmp11 = tmp8 + tmp10
tmp12 = tmp7 + tmp11
tl.store(out_ptr0 + x3, tmp12, None)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (8,), (1,))
assert_size_stride(primals_2, (8, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(32)](primals_2, buf0, 32, XBLOCK=32,
num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(buf0, (4, 8), (1, 4), 0), out=buf1)
del buf0
buf2 = empty_strided_cuda((64, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_1[grid(4096)](primals_4, buf1, primals_1,
buf2, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del buf1
del primals_1
return buf2, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0)
class MyLinear(nn.Module):
"""Linear layer with equalized learning rate and custom learning rate multiplier."""
def __init__(self, input_size, output_size, gain=2 ** 0.5, use_wscale=
False, lrmul=1, bias=True):
super().__init__()
he_std = gain * input_size ** -0.5
if use_wscale:
init_std = 1.0 / lrmul
self.w_mul = he_std * lrmul
else:
init_std = he_std / lrmul
self.w_mul = lrmul
self.weight = torch.nn.Parameter(torch.randn(output_size,
input_size) * init_std)
if bias:
self.bias = torch.nn.Parameter(torch.zeros(output_size))
self.b_mul = lrmul
else:
self.bias = None
def forward(self, x):
bias = self.bias
if bias is not None:
bias = bias * self.b_mul
return F.linear(x, self.weight * self.w_mul, bias)
class StyleModNew(nn.Module):
def __init__(self, latent_size, channels, use_wscale):
super(StyleModNew, self).__init__()
self.lin = MyLinear(latent_size, channels * 2, gain=1.0, use_wscale
=use_wscale)
def forward(self, input_0, input_1):
primals_2 = self.lin.weight
primals_1 = self.lin.bias
primals_4 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
eitanrich/ganspace-manifold
|
StyleMod
| false
| 12,338
|
[
"Apache-2.0"
] | 0
|
148d5d30001c43794a40bbed885601e7816f5d7d
|
https://github.com/eitanrich/ganspace-manifold/tree/148d5d30001c43794a40bbed885601e7816f5d7d
|
ProteinResNetPooler
|
from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class ProteinResNetPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.attention_weights = nn.Linear(config.hidden_size, 1)
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states, mask=None):
attention_scores = self.attention_weights(hidden_states)
if mask is not None:
attention_scores += -10000.0 * (1 - mask)
attention_weights = torch.softmax(attention_scores, -1)
weighted_mean_embedding = torch.matmul(hidden_states.transpose(1, 2
), attention_weights).squeeze(2)
pooled_output = self.dense(weighted_mean_embedding)
pooled_output = self.activation(pooled_output)
return pooled_output
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4)}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 - tmp0
tmp2 = tl_math.exp(tmp1)
tmp3 = tmp2 / tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_1
del primals_2
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(16)](buf1, buf2, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_3, (4, 4, 4), (16, 1,
4), 0), buf2, out=buf3)
buf4 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0)
del buf2
extern_kernels.mm(reinterpret_tensor(buf3, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf4)
buf5 = buf4
del buf4
triton_poi_fused_tanh_1[grid(16)](buf5, primals_5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
return buf5, primals_3, buf1, reinterpret_tensor(buf3, (4, 4), (4, 1), 0
), buf5, primals_4
class ProteinResNetPoolerNew(nn.Module):
def __init__(self, config):
super().__init__()
self.attention_weights = nn.Linear(config.hidden_size, 1)
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, input_0):
primals_1 = self.attention_weights.weight
primals_2 = self.attention_weights.bias
primals_4 = self.dense.weight
primals_5 = self.dense.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
ekvall93/tape
|
ProteinResNetPooler
| false
| 12,339
|
[
"BSD-3-Clause"
] | 0
|
1ca4d5a39c72f806f23a36fb7a7c7325f06096ae
|
https://github.com/ekvall93/tape/tree/1ca4d5a39c72f806f23a36fb7a7c7325f06096ae
|
Accuracy
|
import torch
import torch.nn as nn
def accuracy(logits, labels, ignore_index: 'int'=-100):
with torch.no_grad():
valid_mask = labels != ignore_index
predictions = logits.float().argmax(-1)
correct = (predictions == labels) * valid_mask
return correct.sum().float() / valid_mask.sum().float()
class Accuracy(nn.Module):
def __init__(self, ignore_index: 'int'=-100):
super().__init__()
self.ignore_index = ignore_index
def forward(self, inputs, target):
return accuracy(inputs, target, self.ignore_index)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_argmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp32 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tl.full([1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tl.store(out_ptr0 + x0, tmp46, xmask)
@triton.jit
def triton_per_fused__to_copy_div_eq_mul_ne_sum_1(in_ptr0, in_ptr1,
out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex % 64
r2 = rindex
tmp0 = tl.load(in_ptr0 + r0, None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + r2, None)
tmp1 = tmp0.to(tl.float32)
tmp3 = tmp1 == tmp2
tmp4 = -100.0
tmp5 = tmp2 != tmp4
tmp6 = tmp3 & tmp5
tmp7 = tmp6.to(tl.int64)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = tmp5.to(tl.int64)
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = tmp10.to(tl.float32)
tmp16 = tmp14.to(tl.float32)
tmp17 = tmp15 / tmp16
tl.store(out_ptr2 + tl.full([1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused_argmax_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
triton_per_fused__to_copy_div_eq_mul_ne_sum_1[grid(1)](buf0, arg1_1,
buf3, 1, 256, num_warps=2, num_stages=1)
del arg1_1
del buf0
return buf3,
def accuracy(logits, labels, ignore_index: 'int'=-100):
with torch.no_grad():
valid_mask = labels != ignore_index
predictions = logits.float().argmax(-1)
correct = (predictions == labels) * valid_mask
return correct.sum().float() / valid_mask.sum().float()
class AccuracyNew(nn.Module):
def __init__(self, ignore_index: 'int'=-100):
super().__init__()
self.ignore_index = ignore_index
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
ekvall93/tape
|
Accuracy
| false
| 12,340
|
[
"BSD-3-Clause"
] | 0
|
1ca4d5a39c72f806f23a36fb7a7c7325f06096ae
|
https://github.com/ekvall93/tape/tree/1ca4d5a39c72f806f23a36fb7a7c7325f06096ae
|
DNNModel
|
import torch
from torch import nn
class DNNModel(nn.Module):
def __init__(self, dropout=0.2):
super(DNNModel, self).__init__()
self.fc1 = nn.Linear(4, 4)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(p=dropout)
self.fc2 = nn.Linear(4, 4)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(p=dropout)
self.fc3 = nn.Linear(4, 4)
self.relu3 = nn.ReLU()
self.dropout3 = nn.Dropout(p=dropout)
self.relu_last = nn.ReLU()
self.fc_last = nn.Linear(4, 3)
def forward(self, x):
x = self.relu1(self.fc1(x))
x = self.relu2(self.fc2(x))
x = self.relu3(self.fc3(x))
x = self.fc_last(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (3, 4), (4, 1))
assert_size_stride(primals_9, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf5,
primals_7, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 3), (3, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_8, (4, 3), (1, 4), 0),
alpha=1, beta=1, out=buf6)
del primals_9
return reinterpret_tensor(buf6, (4, 4, 4, 3), (48, 12, 3, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), reinterpret_tensor(buf5, (64, 4), (4, 1), 0
), primals_8, buf7, primals_6, buf8, primals_4, buf9
class DNNModelNew(nn.Module):
def __init__(self, dropout=0.2):
super(DNNModelNew, self).__init__()
self.fc1 = nn.Linear(4, 4)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(p=dropout)
self.fc2 = nn.Linear(4, 4)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(p=dropout)
self.fc3 = nn.Linear(4, 4)
self.relu3 = nn.ReLU()
self.dropout3 = nn.Dropout(p=dropout)
self.relu_last = nn.ReLU()
self.fc_last = nn.Linear(4, 3)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.fc_last.weight
primals_9 = self.fc_last.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
ehsangolshani/workload-to-metric-mapper
|
DNNModel
| false
| 12,341
|
[
"Apache-2.0"
] | 0
|
4c2825696200748382247909f2f777f49bf62cf0
|
https://github.com/ehsangolshani/workload-to-metric-mapper/tree/4c2825696200748382247909f2f777f49bf62cf0
|
SoftDiceLoss
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class SoftDiceLoss(nn.Module):
def __init__(self, weight=None, size_average=True):
"""
Imlements Dice loss function (using Sørensen–Dice coefficient).
"""
super(SoftDiceLoss, self).__init__()
def forward(self, logits, targets):
num = targets.size(0)
probs = F.sigmoid(logits)
m1 = probs.view(num, -1)
m2 = targets.view(num, -1)
intersection = m1 * m2
score = (2.0 * intersection.sum(1) + 1) / (m1.sum(1) + m2.sum(1) + 1)
score = 1 - score.sum() / num
return score
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp14 = tl.where(xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tl.store(out_ptr0 + x0, tmp7, xmask)
tl.store(out_ptr1 + x0, tmp11, xmask)
tl.store(out_ptr2 + x0, tmp15, xmask)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + r0, None)
tmp6 = tl.load(in_ptr2 + r0, None)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp2 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp7 + tmp3
tmp9 = tmp4 / tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp13 = 0.25
tmp14 = tmp12 * tmp13
tmp15 = tmp3 - tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp15, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_sum_0[grid(4)](arg1_1, arg0_1, buf0, buf1,
buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused_add_div_mul_rsub_sum_1[grid(1)](buf4, buf0, buf1,
buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
del buf2
return buf4,
class SoftDiceLossNew(nn.Module):
def __init__(self, weight=None, size_average=True):
"""
Imlements Dice loss function (using Sørensen–Dice coefficient).
"""
super(SoftDiceLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
ekalyashov/segmentation-unet
|
SoftDiceLoss
| false
| 12,342
|
[
"MIT"
] | 0
|
59dc95419481b2535a52332e0be92b15c7450674
|
https://github.com/ekalyashov/segmentation-unet/tree/59dc95419481b2535a52332e0be92b15c7450674
|
LeNet_300_100
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class LeNet_300_100(nn.Module):
"""Simple NN with hidden layers [300, 100]
Based on https://github.com/mi-lad/snip/blob/master/train.py
by Milad Alizadeh.
"""
def __init__(self, save_features=None, bench_model=False):
super(LeNet_300_100, self).__init__()
self.fc1 = nn.Linear(28 * 28, 300, bias=True)
self.fc2 = nn.Linear(300, 100, bias=True)
self.fc3 = nn.Linear(100, 10, bias=True)
self.mask = None
def forward(self, x):
x0 = x.view(-1, 28 * 28)
x1 = F.relu(self.fc1(x0))
x2 = F.relu(self.fc2(x1))
x3 = self.fc3(x2)
return F.log_softmax(x3, dim=1)
def get_inputs():
return [torch.rand([4, 784])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 300
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 100
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__log_softmax_2(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 784), (784, 1))
assert_size_stride(primals_2, (300, 784), (784, 1))
assert_size_stride(primals_3, (300,), (1,))
assert_size_stride(primals_4, (100, 300), (300, 1))
assert_size_stride(primals_5, (100,), (1,))
assert_size_stride(primals_6, (10, 100), (100, 1))
assert_size_stride(primals_7, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 300), (300, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784,
300), (1, 784), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(1200)](buf1, primals_3, 1200, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 100), (100, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (300, 100), (
1, 300), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(400)](buf3, primals_5, 400, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(100, 10), (1, 100), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf7 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_2[grid(4)](buf4, buf7, 4, 10, XBLOCK=
1, num_warps=2, num_stages=1)
del buf4
return buf7, primals_1, buf1, buf3, buf7, primals_6, primals_4
class LeNet_300_100New(nn.Module):
"""Simple NN with hidden layers [300, 100]
Based on https://github.com/mi-lad/snip/blob/master/train.py
by Milad Alizadeh.
"""
def __init__(self, save_features=None, bench_model=False):
super(LeNet_300_100New, self).__init__()
self.fc1 = nn.Linear(28 * 28, 300, bias=True)
self.fc2 = nn.Linear(300, 100, bias=True)
self.fc3 = nn.Linear(100, 10, bias=True)
self.mask = None
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
elony314/sparse_learning
|
LeNet_300_100
| false
| 12,343
|
[
"MIT"
] | 0
|
fff9ea0267016bda747f2882ef8de508ac1369e7
|
https://github.com/elony314/sparse_learning/tree/fff9ea0267016bda747f2882ef8de508ac1369e7
|
Attention
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Attention(nn.Module):
def __init__(self, device, hidden_size):
super(Attention, self).__init__()
self.device = device
self.hidden_size = hidden_size
self.concat_linear = nn.Linear(self.hidden_size * 2, self.hidden_size)
self.attn = nn.Linear(self.hidden_size, hidden_size)
def forward(self, rnn_outputs, final_hidden_state):
_batch_size, _seq_len, _ = rnn_outputs.shape
attn_weights = self.attn(rnn_outputs)
attn_weights = torch.bmm(attn_weights, final_hidden_state.unsqueeze(2))
attn_weights = F.softmax(attn_weights.squeeze(2), dim=1)
context = torch.bmm(rnn_outputs.transpose(1, 2), attn_weights.
unsqueeze(2)).squeeze(2)
attn_hidden = torch.tanh(self.concat_linear(torch.cat((context,
final_hidden_state), dim=1)))
return attn_hidden, attn_weights
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'device': 0, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_tanh_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 8), (8, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1),
0), reinterpret_tensor(primals_4, (4, 4, 1), (4, 1, 1), 0), out
=buf1)
del buf0
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(16)](buf1, buf2, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4), (4, 1), 0)
del buf1
triton_poi_fused__softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 4, 4), (16, 1,
4), 0), reinterpret_tensor(buf3, (4, 4, 1), (4, 1, 1), 0), out=buf4
)
buf5 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
triton_poi_fused_cat_2[grid(32)](buf4, primals_4, buf5, 32, XBLOCK=
32, num_warps=1, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4), (4, 1), 0)
del buf4
extern_kernels.mm(buf5, reinterpret_tensor(primals_5, (8, 4), (1, 8
), 0), out=buf6)
buf7 = buf6
del buf6
triton_poi_fused_tanh_3[grid(16)](buf7, primals_6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_6
return (buf7, buf3, primals_1, buf3, buf5, buf7, primals_5,
reinterpret_tensor(primals_4, (4, 1, 4), (4, 1, 1), 0))
class AttentionNew(nn.Module):
def __init__(self, device, hidden_size):
super(AttentionNew, self).__init__()
self.device = device
self.hidden_size = hidden_size
self.concat_linear = nn.Linear(self.hidden_size * 2, self.hidden_size)
self.attn = nn.Linear(self.hidden_size, hidden_size)
def forward(self, input_0, input_1):
primals_5 = self.concat_linear.weight
primals_3 = self.concat_linear.bias
primals_2 = self.attn.weight
primals_6 = self.attn.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
|
ekvall93/tape
|
Attention
| false
| 12,344
|
[
"BSD-3-Clause"
] | 0
|
1ca4d5a39c72f806f23a36fb7a7c7325f06096ae
|
https://github.com/ekvall93/tape/tree/1ca4d5a39c72f806f23a36fb7a7c7325f06096ae
|
L2Norm
|
import torch
import torch.nn as nn
import torch.onnx
class L2Norm(nn.Module):
"""
Scale shall be learnable according to original paper
scale: initial scale number
chan_num: L2Norm channel number (norm over all channels)
"""
def __init__(self, scale=20, chan_num=512):
super(L2Norm, self).__init__()
self.scale = nn.Parameter(torch.Tensor([scale] * chan_num).view(1,
chan_num, 1, 1))
def forward(self, data):
return self.scale * data * data.pow(2).sum(dim=1, keepdim=True).clamp(
min=1e-12).rsqrt()
def get_inputs():
return [torch.rand([4, 512, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_red_fused_pow_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 256
rnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 16
x1 = xindex // 16
_tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x3 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 2048 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = _tmp3 + tmp2
_tmp3 = tl.where(rmask & xmask, tmp4, _tmp3)
tmp3 = tl.sum(_tmp3, 1)[:, None]
tl.store(out_ptr0 + x3, tmp3, xmask)
@triton.jit
def triton_per_fused_clamp_pow_rsqrt_sum_1(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 64 * x1), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 1e-12
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = libdevice.rsqrt(tmp6)
tl.debug_barrier()
tl.store(in_out_ptr0 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 512
x3 = xindex
x0 = xindex % 16
x2 = xindex // 8192
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x3, None)
tmp3 = tl.load(in_ptr2 + (x0 + 16 * x2), None, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x3, tmp4, None)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_2, (4, 512, 4, 4), (8192, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 256, 4, 1, 16),
torch.float32)
get_raw_stream(0)
triton_red_fused_pow_sum_0[grid(256)](primals_2, buf0, 256, 128,
XBLOCK=64, RBLOCK=8, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf2 = reinterpret_tensor(buf1, (4, 1, 4, 4), (16, 16, 4, 1), 0)
del buf1
triton_per_fused_clamp_pow_rsqrt_sum_1[grid(64)](buf2, buf0, 64, 4,
XBLOCK=64, num_warps=2, num_stages=1)
del buf0
buf3 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.
float32)
triton_poi_fused_mul_2[grid(32768)](primals_1, primals_2, buf2,
buf3, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
return buf3, primals_2, buf2
class L2NormNew(nn.Module):
"""
Scale shall be learnable according to original paper
scale: initial scale number
chan_num: L2Norm channel number (norm over all channels)
"""
def __init__(self, scale=20, chan_num=512):
super(L2NormNew, self).__init__()
self.scale = nn.Parameter(torch.Tensor([scale] * chan_num).view(1,
chan_num, 1, 1))
def forward(self, input_0):
primals_1 = self.scale
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
ephrem-git/inference
|
L2Norm
| false
| 12,345
|
[
"Apache-2.0"
] | 0
|
bfbda5fc419364c3f71b5b1640f6c00e7675b212
|
https://github.com/ephrem-git/inference/tree/bfbda5fc419364c3f71b5b1640f6c00e7675b212
|
Get_gradient_nopadding
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Get_gradient_nopadding(nn.Module):
def __init__(self):
super(Get_gradient_nopadding, self).__init__()
kernel_v = [[0, -1, 0], [0, 0, 0], [0, 1, 0]]
kernel_h = [[0, 0, 0], [-1, 0, 1], [0, 0, 0]]
kernel_h = torch.FloatTensor(kernel_h).unsqueeze(0).unsqueeze(0)
kernel_v = torch.FloatTensor(kernel_v).unsqueeze(0).unsqueeze(0)
self.weight_h = nn.Parameter(data=kernel_h, requires_grad=False)
self.weight_v = nn.Parameter(data=kernel_v, requires_grad=False)
def forward(self, x):
x_list = []
for i in range(x.shape[1]):
x_i = x[:, i]
x_i_v = F.conv2d(x_i.unsqueeze(1), self.weight_v, padding=1)
x_i_h = F.conv2d(x_i.unsqueeze(1), self.weight_h, padding=1)
x_i = torch.sqrt(torch.pow(x_i_v, 2) + torch.pow(x_i_h, 2) + 1e-06)
x_list.append(x_i)
x = torch.cat(x_list, dim=1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 4
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp5 * tmp5
tmp7 = tl.load(in_ptr1 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp10 = 1e-06
tmp11 = tmp9 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tmp0 >= tmp3
tmp16 = tl.full([1], 2, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = tmp15 & tmp17
tmp19 = tl.load(in_ptr2 + (x0 + 16 * x2), tmp18 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tmp19 * tmp19
tmp21 = tl.load(in_ptr3 + (x0 + 16 * x2), tmp18 & xmask,
eviction_policy='evict_last', other=0.0)
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = tmp23 + tmp10
tmp25 = libdevice.sqrt(tmp24)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp18, tmp25, tmp26)
tmp28 = tmp0 >= tmp16
tmp29 = tl.full([1], 3, tl.int64)
tmp30 = tmp0 < tmp29
tmp31 = tmp28 & tmp30
tmp32 = tl.load(in_ptr4 + (x0 + 16 * x2), tmp31 & xmask,
eviction_policy='evict_last', other=0.0)
tmp33 = tmp32 * tmp32
tmp34 = tl.load(in_ptr5 + (x0 + 16 * x2), tmp31 & xmask,
eviction_policy='evict_last', other=0.0)
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp36 + tmp10
tmp38 = libdevice.sqrt(tmp37)
tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype)
tmp40 = tl.where(tmp31, tmp38, tmp39)
tmp41 = tmp0 >= tmp29
tl.full([1], 4, tl.int64)
tmp44 = tl.load(in_ptr6 + (x0 + 16 * x2), tmp41 & xmask,
eviction_policy='evict_last', other=0.0)
tmp45 = tmp44 * tmp44
tmp46 = tl.load(in_ptr7 + (x0 + 16 * x2), tmp41 & xmask,
eviction_policy='evict_last', other=0.0)
tmp47 = tmp46 * tmp46
tmp48 = tmp45 + tmp47
tmp49 = tmp48 + tmp10
tmp50 = libdevice.sqrt(tmp49)
tmp51 = tl.full(tmp50.shape, 0.0, tmp50.dtype)
tmp52 = tl.where(tmp41, tmp50, tmp51)
tmp53 = tl.where(tmp31, tmp40, tmp52)
tmp54 = tl.where(tmp18, tmp27, tmp53)
tmp55 = tl.where(tmp4, tmp14, tmp54)
tl.store(out_ptr0 + x3, tmp55, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(arg2_1, (1, 1, 3, 3), (9, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1,
4, 4), (64, 0, 4, 1), 0), arg1_1, stride=(1, 1), padding=(1, 1),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1))
buf1 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1,
4, 4), (64, 0, 4, 1), 0), arg2_1, stride=(1, 1), padding=(1, 1),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
buf2 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1,
4, 4), (64, 0, 4, 1), 16), arg1_1, stride=(1, 1), padding=(1, 1
), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 4, 4), (16, 16, 4, 1))
buf3 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1,
4, 4), (64, 0, 4, 1), 16), arg2_1, stride=(1, 1), padding=(1, 1
), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf3, (4, 1, 4, 4), (16, 16, 4, 1))
buf4 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1,
4, 4), (64, 0, 4, 1), 32), arg1_1, stride=(1, 1), padding=(1, 1
), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf4, (4, 1, 4, 4), (16, 16, 4, 1))
buf5 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1,
4, 4), (64, 0, 4, 1), 32), arg2_1, stride=(1, 1), padding=(1, 1
), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf5, (4, 1, 4, 4), (16, 16, 4, 1))
buf6 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1,
4, 4), (64, 0, 4, 1), 48), arg1_1, stride=(1, 1), padding=(1, 1
), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf6, (4, 1, 4, 4), (16, 16, 4, 1))
del arg1_1
buf7 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1,
4, 4), (64, 0, 4, 1), 48), arg2_1, stride=(1, 1), padding=(1, 1
), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf7, (4, 1, 4, 4), (16, 16, 4, 1))
del arg0_1
del arg2_1
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(256)](buf0, buf1, buf2, buf3, buf4,
buf5, buf6, buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del buf1
del buf2
del buf3
del buf4
del buf5
del buf6
del buf7
return buf8,
class Get_gradient_nopaddingNew(nn.Module):
def __init__(self):
super(Get_gradient_nopaddingNew, self).__init__()
kernel_v = [[0, -1, 0], [0, 0, 0], [0, 1, 0]]
kernel_h = [[0, 0, 0], [-1, 0, 1], [0, 0, 0]]
kernel_h = torch.FloatTensor(kernel_h).unsqueeze(0).unsqueeze(0)
kernel_v = torch.FloatTensor(kernel_v).unsqueeze(0).unsqueeze(0)
self.weight_h = nn.Parameter(data=kernel_h, requires_grad=False)
self.weight_v = nn.Parameter(data=kernel_v, requires_grad=False)
def forward(self, input_0):
arg1_1 = self.weight_h
arg2_1 = self.weight_v
arg0_1 = input_0
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
eqprog/ESRGAN
|
Get_gradient_nopadding
| false
| 12,346
|
[
"Apache-2.0"
] | 0
|
d5eb02531cf0ce4e8df93793f3012486bac8d87a
|
https://github.com/eqprog/ESRGAN/tree/d5eb02531cf0ce4e8df93793f3012486bac8d87a
|
MultiheadAttention
|
import torch
import torch.nn.functional as F
from torch import nn
import torch.utils.data
from torch.nn import Parameter
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True,
add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads'
self.scaling = self.head_dim ** -0.5
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(self, query, key, value, key_padding_mask=None,
incremental_state=None, need_weights=True, static_kv=False,
attn_mask=None):
"""Input shape: Time x Batch x Channel
Self-attention can be implemented by passing in the same arguments for
query, key and value. Timesteps can be masked by supplying a T x T mask in the
`attn_mask` argument. Padding elements can be excluded from
the key by passing a binary ByteTensor (`key_padding_mask`) with shape:
batch x src_len, where padding elements are indicated by 1s.
"""
qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr()
kv_same = key.data_ptr() == value.data_ptr()
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
assert key.size() == value.size()
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if 'prev_key' in saved_state:
if static_kv:
assert kv_same and not qkv_same
key = value = None
else:
saved_state = None
if qkv_same:
q, k, v = self.in_proj_qkv(query)
elif kv_same:
q = self.in_proj_q(query)
if key is None:
assert value is None
k = v = None
else:
k, v = self.in_proj_kv(key)
else:
q = self.in_proj_q(query)
k = self.in_proj_k(key)
v = self.in_proj_v(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(
attn_mask.size(0), 1)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat([key_padding_mask,
key_padding_mask.new_zeros(key_padding_mask.size(0), 1)
], dim=1)
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if saved_state is not None:
if 'prev_key' in saved_state:
prev_key = saved_state['prev_key'].view(bsz * self.
num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
k = torch.cat((prev_key, k), dim=1)
if 'prev_value' in saved_state:
prev_value = saved_state['prev_value'].view(bsz * self.
num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
v = torch.cat((prev_value, v), dim=1)
saved_state['prev_key'] = k.view(bsz, self.num_heads, -1, self.
head_dim)
saved_state['prev_value'] = v.view(bsz, self.num_heads, -1,
self.head_dim)
self._set_input_buffer(incremental_state, saved_state)
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])],
dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])],
dim=1)
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(
attn_mask.size(0), 1)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat([key_padding_mask, torch.zeros
(key_padding_mask.size(0), 1).type_as(key_padding_mask)
], dim=1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len,
src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if key_padding_mask is not None:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len,
src_len)
if self.onnx_trace:
attn_weights = torch.where(key_padding_mask.unsqueeze(1).
unsqueeze(2), torch.Tensor([float('-Inf')]),
attn_weights.float()).type_as(attn_weights)
else:
attn_weights = attn_weights.float().masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2), float('-inf')
).type_as(attn_weights)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len,
src_len)
attn_weights = F.softmax(attn_weights.float(), dim=-1).type_as(
attn_weights)
attn_weights = F.dropout(attn_weights, p=self.dropout, training=
self.training)
attn = torch.bmm(attn_weights, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.
head_dim]
if self.onnx_trace and attn.size(1) == 1:
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz,
embed_dim)
attn = self.out_proj(attn)
if need_weights:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len,
src_len)
attn_weights = attn_weights.sum(dim=1) / self.num_heads
else:
attn_weights = None
return attn, attn_weights
def in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def in_proj_kv(self, key):
return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1)
def in_proj_q(self, query):
return self._in_proj(query, end=self.embed_dim)
def in_proj_k(self, key):
return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
def in_proj_v(self, value):
return self._in_proj(value, start=2 * self.embed_dim)
def _in_proj(self, input, start=0, end=None):
weight = self.in_proj_weight
bias = self.in_proj_bias
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias)
def reorder_incremental_state(self, incremental_state, new_order):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer[k] = input_buffer[k].index_select(0, new_order)
self._set_input_buffer(incremental_state, input_buffer)
def _get_input_buffer(self, incremental_state):
return utils.get_incremental_state(self, incremental_state,
'attn_state') or {}
def _set_input_buffer(self, incremental_state, buffer):
utils.set_incremental_state(self, incremental_state, 'attn_state',
buffer)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4,
4, 4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4, 'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn.functional as F
from torch import nn
import torch.utils.data
from torch.nn import Parameter
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_div_sum_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 256 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0 + 256 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (192 + x0 + 256 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 4),
reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=1,
beta=1, out=buf1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8),
reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1,
beta=1, out=buf2)
del primals_4
buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(64)](buf3, primals_5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 0),
0), reinterpret_tensor(buf1, (16, 1, 16), (1, 1, 16), 0), out=buf4)
buf7 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
triton_per_fused__softmax_1[grid(64)](buf4, buf7, 64, 16, XBLOCK=32,
num_warps=4, num_stages=1)
del buf4
buf8 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf7, reinterpret_tensor(buf2, (16, 16, 1), (1,
16, 1), 0), out=buf8)
buf9 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(4, 16)](buf8, buf9, 4, 16, XBLOCK=16,
YBLOCK=4, num_warps=1, num_stages=1)
buf10 = reinterpret_tensor(buf8, (16, 4), (4, 1), 0)
del buf8
extern_kernels.addmm(primals_7, reinterpret_tensor(buf9, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf10)
del primals_7
buf11 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_div_sum_3[grid(256)](buf7, buf11, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0
), buf11, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf9, (16, 4), (4, 1), 0
), primals_6, reinterpret_tensor(buf2, (16, 1, 16), (1, 1, 16), 0
), reinterpret_tensor(buf3, (16, 1, 4), (1, 1, 16), 0
), reinterpret_tensor(buf1, (16, 16, 1), (1, 16, 1), 0)
class MultiheadAttentionNew(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True,
add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads'
self.scaling = self.head_dim ** -0.5
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def in_proj_kv(self, key):
return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1)
def in_proj_q(self, query):
return self._in_proj(query, end=self.embed_dim)
def in_proj_k(self, key):
return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
def in_proj_v(self, value):
return self._in_proj(value, start=2 * self.embed_dim)
def _in_proj(self, input, start=0, end=None):
weight = self.in_proj_weight
bias = self.in_proj_bias
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias)
def reorder_incremental_state(self, incremental_state, new_order):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer[k] = input_buffer[k].index_select(0, new_order)
self._set_input_buffer(incremental_state, input_buffer)
def _get_input_buffer(self, incremental_state):
return utils.get_incremental_state(self, incremental_state,
'attn_state') or {}
def _set_input_buffer(self, incremental_state, buffer):
utils.set_incremental_state(self, incremental_state, 'attn_state',
buffer)
def forward(self, input_0, input_1, input_2):
primals_4 = self.in_proj_weight
primals_5 = self.in_proj_bias
primals_6 = self.out_proj.weight
primals_7 = self.out_proj.bias
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
|
edbltn/fairseq
|
MultiheadAttention
| false
| 12,347
|
[
"BSD-3-Clause"
] | 0
|
e4d25fd96f1e38190400dbbdbc77eeda71ac50a0
|
https://github.com/edbltn/fairseq/tree/e4d25fd96f1e38190400dbbdbc77eeda71ac50a0
|
StackTime
|
import torch
import torch.onnx
class StackTime(torch.nn.Module):
__constants__ = ['factor']
def __init__(self, factor):
super().__init__()
self.factor = int(factor)
def forward(self, x, x_lens):
seq = [x]
for i in range(1, self.factor):
tmp = torch.zeros_like(x)
tmp[:-i, :, :] = x[i:, :, :]
seq.append(tmp)
x_lens = torch.ceil(x_lens.float() / self.factor).int()
return torch.cat(seq, dim=2)[::self.factor, :, :], x_lens
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'factor': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 16
x0 = xindex % 4
x4 = xindex // 64
x3 = xindex // 256
x2 = xindex // 64 % 4
x5 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x4), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = x3
tmp11 = tl.full([1], 3, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp12 & tmp9
tmp14 = tl.load(in_ptr0 + (64 + x0 + 4 * (-4 + x1) + 16 * x4), tmp13 &
xmask, other=0.0)
tmp15 = 0.0
tmp16 = tl.where(tmp12, tmp14, tmp15)
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp9, tmp16, tmp17)
tmp19 = tmp0 >= tmp7
tmp20 = tl.full([1], 12, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tmp19 & tmp21
tmp23 = tl.full([1], 2, tl.int64)
tmp24 = tmp10 < tmp23
tmp25 = tmp24 & tmp22
tmp26 = tl.load(in_ptr0 + (128 + x0 + 4 * (-8 + x1) + 16 * x4), tmp25 &
xmask, other=0.0)
tmp27 = tl.where(tmp24, tmp26, tmp15)
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp22, tmp27, tmp28)
tmp30 = tmp0 >= tmp20
tl.full([1], 16, tl.int64)
tmp33 = tl.full([1], 1, tl.int64)
tmp34 = tmp10 < tmp33
tmp35 = tmp34 & tmp30
tmp36 = tl.load(in_ptr0 + (192 + x0 + 4 * (-12 + x1) + 16 * x2), tmp35 &
xmask, eviction_policy='evict_last', other=0.0)
tmp37 = tl.where(tmp34, tmp36, tmp15)
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp30, tmp37, tmp38)
tmp40 = tl.where(tmp22, tmp29, tmp39)
tmp41 = tl.where(tmp9, tmp18, tmp40)
tmp42 = tl.where(tmp4, tmp5, tmp41)
tl.store(out_ptr0 + x5, tmp42, xmask)
@triton.jit
def triton_poi_fused__to_copy_ceil_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tmp3 = libdevice.ceil(tmp2)
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 16, 4), (256, 64, 4, 1), torch.float32
)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int32)
triton_poi_fused__to_copy_ceil_div_1[grid(256)](arg1_1, buf1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg1_1
return reinterpret_tensor(buf0, (1, 4, 16, 4), (1024, 64, 4, 1), 0), buf1
class StackTimeNew(torch.nn.Module):
__constants__ = ['factor']
def __init__(self, factor):
super().__init__()
self.factor = int(factor)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0], output[1]
|
ephrem-git/inference
|
StackTime
| false
| 12,348
|
[
"Apache-2.0"
] | 0
|
bfbda5fc419364c3f71b5b1640f6c00e7675b212
|
https://github.com/ephrem-git/inference/tree/bfbda5fc419364c3f71b5b1640f6c00e7675b212
|
SharpenedCosineSimilarity
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def unfold2d(x, kernel_size: 'int', stride: 'int', padding: 'int'):
x = F.pad(x, [padding] * 4)
bs, in_c, h, w = x.size()
ks = kernel_size
strided_x = x.as_strided((bs, in_c, (h - ks) // stride + 1, (w - ks) //
stride + 1, ks, ks), (in_c * h * w, h * w, stride * w, stride, w, 1))
return strided_x
class SharpenedCosineSimilarity(nn.Module):
def __init__(self, in_channels=1, out_channels=1, kernel_size=1, stride
=1, padding=0, eps=1e-12):
super(SharpenedCosineSimilarity, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.eps = eps
self.padding = int(padding)
w = torch.empty(out_channels, in_channels, kernel_size, kernel_size)
nn.init.xavier_uniform_(w)
self.w = nn.Parameter(w.view(out_channels, in_channels, -1),
requires_grad=True)
self.p_scale = 10
p_init = 2 ** 0.5 * self.p_scale
self.register_parameter('p', nn.Parameter(torch.empty(out_channels)))
nn.init.constant_(self.p, p_init)
self.q_scale = 100
self.register_parameter('q', nn.Parameter(torch.empty(1)))
nn.init.constant_(self.q, 10)
def forward(self, x):
x = unfold2d(x, kernel_size=self.kernel_size, stride=self.stride,
padding=self.padding)
n, c, h, w, _, _ = x.shape
x = x.reshape(n, c, h, w, -1)
square_sum = torch.sum(torch.square(x), [1, 4], keepdim=True)
x_norm = torch.add(torch.sqrt(square_sum + self.eps), torch.square(
self.q / self.q_scale))
square_sum = torch.sum(torch.square(self.w), [1, 2], keepdim=True)
w_norm = torch.add(torch.sqrt(square_sum + self.eps), torch.square(
self.q / self.q_scale))
x = torch.einsum('nchwl,vcl->nvhw', x / x_norm, self.w / w_norm)
sign = torch.sign(x)
x = torch.abs(x) + self.eps
x = x.pow(torch.square(self.p / self.p_scale).view(1, -1, 1, 1))
return sign * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_abs_add_div_mul_pow_sign_sqrt_sum_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp8 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp14 = tl.load(in_ptr1 + 0)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp27 = tl.load(in_ptr2 + 0)
tmp28 = tl.broadcast_to(tmp27, [XBLOCK])
tmp44 = tl.load(in_ptr3 + 0)
tmp45 = tl.broadcast_to(tmp44, [XBLOCK])
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = 1e-12
tmp12 = tmp10 + tmp11
tmp13 = libdevice.sqrt(tmp12)
tmp16 = 0.01
tmp17 = tmp15 * tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp13 + tmp18
tmp20 = tmp0 / tmp19
tmp21 = tmp2 / tmp19
tmp22 = tmp20 + tmp21
tmp23 = tmp5 / tmp19
tmp24 = tmp22 + tmp23
tmp25 = tmp8 / tmp19
tmp26 = tmp24 + tmp25
tmp29 = tmp28 * tmp28
tmp30 = tmp29 + tmp11
tmp31 = libdevice.sqrt(tmp30)
tmp32 = tmp31 + tmp18
tmp33 = tmp28 / tmp32
tmp34 = tmp26 * tmp33
tmp35 = tl.full([1], 0, tl.int32)
tmp36 = tmp35 < tmp34
tmp37 = tmp36.to(tl.int8)
tmp38 = tmp34 < tmp35
tmp39 = tmp38.to(tl.int8)
tmp40 = tmp37 - tmp39
tmp41 = tmp40.to(tmp34.dtype)
tmp42 = tl_math.abs(tmp34)
tmp43 = tmp42 + tmp11
tmp46 = 0.1
tmp47 = tmp45 * tmp46
tmp48 = tmp47 * tmp47
tmp49 = libdevice.pow(tmp43, tmp48)
tmp50 = tmp41 * tmp49
tl.store(in_out_ptr0 + x2, tmp50, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (1, 1, 1), (1, 1, 1))
assert_size_stride(primals_4, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4, 1), (16, 64, 4, 1, 64),
torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 4, 4, 1, 1), (16, 64, 4, 1,
64, 64), 0)
del buf0
buf2 = reinterpret_tensor(buf1, (4, 1, 4, 4), (16, 16, 4, 1), 0)
del buf1
get_raw_stream(0)
triton_poi_fused_abs_add_div_mul_pow_sign_sqrt_sum_0[grid(64)](buf2,
primals_1, primals_2, primals_3, primals_4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
return buf2, primals_1, primals_2, primals_3, primals_4
def unfold2d(x, kernel_size: 'int', stride: 'int', padding: 'int'):
x = F.pad(x, [padding] * 4)
bs, in_c, h, w = x.size()
ks = kernel_size
strided_x = x.as_strided((bs, in_c, (h - ks) // stride + 1, (w - ks) //
stride + 1, ks, ks), (in_c * h * w, h * w, stride * w, stride, w, 1))
return strided_x
class SharpenedCosineSimilarityNew(nn.Module):
def __init__(self, in_channels=1, out_channels=1, kernel_size=1, stride
=1, padding=0, eps=1e-12):
super(SharpenedCosineSimilarityNew, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.eps = eps
self.padding = int(padding)
w = torch.empty(out_channels, in_channels, kernel_size, kernel_size)
nn.init.xavier_uniform_(w)
self.w = nn.Parameter(w.view(out_channels, in_channels, -1),
requires_grad=True)
self.p_scale = 10
p_init = 2 ** 0.5 * self.p_scale
self.register_parameter('p', nn.Parameter(torch.empty(out_channels)))
nn.init.constant_(self.p, p_init)
self.q_scale = 100
self.register_parameter('q', nn.Parameter(torch.empty(1)))
nn.init.constant_(self.q, 10)
def forward(self, input_0):
primals_3 = self.w
primals_2 = self.p
primals_4 = self.q
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
enzokro/sharpened_cosine_similarity_torch
|
SharpenedCosineSimilarity
| false
| 12,349
|
[
"MIT"
] | 0
|
150c84f5cf81721baf097abdc0d4ac772fb39fc4
|
https://github.com/enzokro/sharpened_cosine_similarity_torch/tree/150c84f5cf81721baf097abdc0d4ac772fb39fc4
|
PredictionHead
|
import torch
import torch.nn as nn
import torch.onnx
class PredictionHead(nn.Module):
def __init__(self, in_channels, num_classes, num_anchors):
super(PredictionHead, self).__init__()
self.classification = nn.Conv2d(in_channels, num_classes *
num_anchors, kernel_size=1)
self.regression = nn.Conv2d(in_channels, num_anchors * 4, kernel_size=1
)
self.num_classes = num_classes
self.num_anchors = num_anchors
def forward(self, x):
bs = x.shape[0]
class_logits = self.classification(x)
box_regression = self.regression(x)
class_logits = class_logits.permute(0, 2, 3, 1).reshape(bs, -1,
self.num_classes)
box_regression = box_regression.permute(0, 2, 3, 1).reshape(bs, -1, 4)
return class_logits, box_regression
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'num_classes': 4, 'num_anchors': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__unsafe_view_clone_0(in_out_ptr0, in_ptr0, in_ptr1,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 256 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.debug_barrier()
tl.store(in_out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (16, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (16, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 4, 4), (256, 16, 4, 1))
buf1 = extern_kernels.convolution(primals_1, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 16, 4, 4), (256, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.
float32)
buf3 = reinterpret_tensor(buf2, (4, 64, 4), (256, 4, 1), 0)
del buf2
get_raw_stream(0)
triton_poi_fused__unsafe_view_clone_0[grid(64, 16)](buf3, buf0,
primals_3, 64, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0)
del buf0
buf5 = reinterpret_tensor(buf4, (4, 64, 4), (256, 4, 1), 0)
del buf4
triton_poi_fused__unsafe_view_clone_0[grid(64, 16)](buf5, buf1,
primals_5, 64, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del buf1
del primals_5
return buf3, buf5, primals_1, primals_2, primals_4
class PredictionHeadNew(nn.Module):
def __init__(self, in_channels, num_classes, num_anchors):
super(PredictionHeadNew, self).__init__()
self.classification = nn.Conv2d(in_channels, num_classes *
num_anchors, kernel_size=1)
self.regression = nn.Conv2d(in_channels, num_anchors * 4, kernel_size=1
)
self.num_classes = num_classes
self.num_anchors = num_anchors
def forward(self, input_0):
primals_2 = self.classification.weight
primals_3 = self.classification.bias
primals_4 = self.regression.weight
primals_5 = self.regression.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
|
ephrem-git/inference
|
PredictionHead
| false
| 12,350
|
[
"Apache-2.0"
] | 0
|
bfbda5fc419364c3f71b5b1640f6c00e7675b212
|
https://github.com/ephrem-git/inference/tree/bfbda5fc419364c3f71b5b1640f6c00e7675b212
|
ExtClassifier
|
import torch
import torch.nn as nn
import torch.cuda
import torch.distributed
class ExtClassifier(nn.Module):
def __init__(self, hidden_size):
super(ExtClassifier, self).__init__()
self.linear1 = nn.Linear(hidden_size, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x, mask=None):
h = self.linear1(x).squeeze(-1)
if mask:
sent_scores = self.sigmoid(h) * mask.float()
else:
sent_scores = self.sigmoid(h)
return sent_scores
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.cuda
import torch.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(64)](buf1, primals_2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_2
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1
class ExtClassifierNew(nn.Module):
def __init__(self, hidden_size):
super(ExtClassifierNew, self).__init__()
self.linear1 = nn.Linear(hidden_size, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
eric-zhizu/OpenNMT-kpg-release
|
ExtClassifier
| false
| 12,351
|
[
"MIT"
] | 0
|
9f15dea6f663425eef2157845c4c8042ad845c11
|
https://github.com/eric-zhizu/OpenNMT-kpg-release/tree/9f15dea6f663425eef2157845c4c8042ad845c11
|
SharpenedCosineSimilarity_ConvImpl
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class SharpenedCosineSimilarity_ConvImpl(nn.Module):
def __init__(self, in_channels=1, out_channels=1, kernel_size=1, stride
=1, padding=0, eps=1e-12):
super(SharpenedCosineSimilarity_ConvImpl, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.eps = eps
self.padding = int(padding)
w = torch.empty(out_channels, in_channels, kernel_size, kernel_size)
nn.init.xavier_uniform_(w)
self.w = nn.Parameter(w.view(out_channels, in_channels, -1),
requires_grad=True)
self.p_scale = 10
p_init = 2 ** 0.5 * self.p_scale
self.register_parameter('p', nn.Parameter(torch.empty(out_channels)))
nn.init.constant_(self.p, p_init)
self.q_scale = 100
self.register_parameter('q', nn.Parameter(torch.empty(1)))
nn.init.constant_(self.q, 10)
def forward(self, x):
w = self.w.reshape(self.out_channels, self.in_channels, self.
kernel_size, self.kernel_size)
w_norm = torch.linalg.vector_norm(w, dim=(1, 2, 3), keepdim=True)
q_sqr = (self.q / self.q_scale) ** 2
w_normed = w / (w_norm + self.eps + q_sqr)
x_norm_squared = F.avg_pool2d(x ** 2, kernel_size=self.kernel_size,
stride=self.stride, padding=self.padding, divisor_override=1).sum(
dim=1, keepdim=True)
y_denorm = F.conv2d(x, w_normed, bias=None, stride=self.stride,
padding=self.padding)
y = y_denorm / ((x_norm_squared + self.eps).sqrt() + q_sqr)
sign = torch.sign(y)
y = torch.abs(y) + self.eps
p_sqr = (self.p / self.p_scale) ** 2
y = y.pow(p_sqr.reshape(1, -1, 1, 1))
return sign * y
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_linalg_vector_norm_pow_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp6 = tl.load(in_ptr1 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp2 = tmp1 * tmp1
tmp3 = libdevice.sqrt(tmp2)
tmp4 = 1e-12
tmp5 = tmp3 + tmp4
tmp8 = 0.01
tmp9 = tmp7 * tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp5 + tmp10
tmp12 = tmp1 / tmp11
tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp12, None)
@triton.jit
def triton_poi_fused_abs_add_avg_pool2d_div_mul_pow_sign_sqrt_sum_1(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp7 = tl.load(in_ptr1 + 0)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp13 = tl.load(in_ptr2 + x0, None)
tmp24 = tl.load(in_ptr3 + 0)
tmp25 = tl.broadcast_to(tmp24, [XBLOCK])
tmp1 = tmp0 * tmp0
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = 1e-12
tmp5 = tmp3 + tmp4
tmp6 = libdevice.sqrt(tmp5)
tmp9 = 0.01
tmp10 = tmp8 * tmp9
tmp11 = tmp10 * tmp10
tmp12 = tmp6 + tmp11
tmp14 = tmp13 / tmp12
tmp15 = tl.full([1], 0, tl.int32)
tmp16 = tmp15 < tmp14
tmp17 = tmp16.to(tl.int8)
tmp18 = tmp14 < tmp15
tmp19 = tmp18.to(tl.int8)
tmp20 = tmp17 - tmp19
tmp21 = tmp20.to(tmp14.dtype)
tmp22 = tl_math.abs(tmp14)
tmp23 = tmp22 + tmp4
tmp26 = 0.1
tmp27 = tmp25 * tmp26
tmp28 = tmp27 * tmp27
tmp29 = libdevice.pow(tmp23, tmp28)
tmp30 = tmp21 * tmp29
tl.store(out_ptr0 + x0, tmp12, None)
tl.store(out_ptr1 + x0, tmp30, None)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (1, 1, 1), (1, 1, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 1, 1, 1), (1, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_linalg_vector_norm_pow_0[grid(1)](primals_1,
primals_2, buf0, 1, XBLOCK=1, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(primals_3, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf2 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1),
torch.float32)
triton_poi_fused_abs_add_avg_pool2d_div_mul_pow_sign_sqrt_sum_1[grid
(16384)](primals_3, primals_2, buf1, primals_4, buf2, buf3,
16384, XBLOCK=256, num_warps=4, num_stages=1)
return buf3, primals_1, primals_2, primals_3, primals_4, buf0, buf1, buf2
class SharpenedCosineSimilarity_ConvImplNew(nn.Module):
def __init__(self, in_channels=1, out_channels=1, kernel_size=1, stride
=1, padding=0, eps=1e-12):
super(SharpenedCosineSimilarity_ConvImplNew, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.eps = eps
self.padding = int(padding)
w = torch.empty(out_channels, in_channels, kernel_size, kernel_size)
nn.init.xavier_uniform_(w)
self.w = nn.Parameter(w.view(out_channels, in_channels, -1),
requires_grad=True)
self.p_scale = 10
p_init = 2 ** 0.5 * self.p_scale
self.register_parameter('p', nn.Parameter(torch.empty(out_channels)))
nn.init.constant_(self.p, p_init)
self.q_scale = 100
self.register_parameter('q', nn.Parameter(torch.empty(1)))
nn.init.constant_(self.q, 10)
def forward(self, input_0):
primals_1 = self.w
primals_2 = self.p
primals_4 = self.q
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
enzokro/sharpened_cosine_similarity_torch
|
SharpenedCosineSimilarity_ConvImpl
| false
| 12,352
|
[
"MIT"
] | 0
|
150c84f5cf81721baf097abdc0d4ac772fb39fc4
|
https://github.com/enzokro/sharpened_cosine_similarity_torch/tree/150c84f5cf81721baf097abdc0d4ac772fb39fc4
|
SharpenedCosineSimilarityAnnotated
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class SharpenedCosineSimilarityAnnotated(nn.Module):
def __init__(self, in_channels=1, out_channels=1, kernel_size=1, stride
=1, padding=0, eps=1e-12):
super(SharpenedCosineSimilarityAnnotated, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.eps = eps
self.padding = int(padding)
w = torch.empty(out_channels, in_channels, kernel_size, kernel_size)
self.register_parameter('w', nn.Parameter(w))
nn.init.xavier_uniform_(self.w)
self.p_scale = 10
p_init = 2 ** 0.5 * self.p_scale
self.register_parameter('p', nn.Parameter(torch.empty(out_channels)))
nn.init.constant_(self.p, p_init)
self.q_scale = 100
self.register_parameter('q', nn.Parameter(torch.empty(1)))
nn.init.constant_(self.q, 10)
def forward(self, x):
w_norm = torch.linalg.vector_norm(self.w, dim=(1, 2, 3), keepdim=True)
q_sqr = (self.q / self.q_scale) ** 2
w_normed = self.w / (w_norm + self.eps + q_sqr)
x_norm_squared = F.avg_pool2d((x + self.eps) ** 2, kernel_size=self
.kernel_size, stride=self.stride, padding=self.padding,
divisor_override=1).sum(dim=1, keepdim=True)
y_denorm = F.conv2d(x, w_normed, bias=None, stride=self.stride,
padding=self.padding)
y = y_denorm / (x_norm_squared.sqrt() + q_sqr)
sign = torch.sign(y)
y = torch.abs(y) + self.eps
p_sqr = (self.p / self.p_scale) ** 2
y = y.pow(p_sqr.reshape(1, -1, 1, 1))
return sign * y
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_linalg_vector_norm_pow_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp6 = tl.load(in_ptr1 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp2 = tmp1 * tmp1
tmp3 = libdevice.sqrt(tmp2)
tmp4 = 1e-12
tmp5 = tmp3 + tmp4
tmp8 = 0.01
tmp9 = tmp7 * tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp5 + tmp10
tmp12 = tmp1 / tmp11
tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp12, None)
@triton.jit
def triton_poi_fused_abs_add_avg_pool2d_div_mul_pow_sign_sqrt_sum_1(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp7 = tl.load(in_ptr1 + 0)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp13 = tl.load(in_ptr2 + x0, None)
tmp24 = tl.load(in_ptr3 + 0)
tmp25 = tl.broadcast_to(tmp24, [XBLOCK])
tmp1 = 1e-12
tmp2 = tmp0 + tmp1
tmp3 = tmp2 * tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = libdevice.sqrt(tmp5)
tmp9 = 0.01
tmp10 = tmp8 * tmp9
tmp11 = tmp10 * tmp10
tmp12 = tmp6 + tmp11
tmp14 = tmp13 / tmp12
tmp15 = tl.full([1], 0, tl.int32)
tmp16 = tmp15 < tmp14
tmp17 = tmp16.to(tl.int8)
tmp18 = tmp14 < tmp15
tmp19 = tmp18.to(tl.int8)
tmp20 = tmp17 - tmp19
tmp21 = tmp20.to(tmp14.dtype)
tmp22 = tl_math.abs(tmp14)
tmp23 = tmp22 + tmp1
tmp26 = 0.1
tmp27 = tmp25 * tmp26
tmp28 = tmp27 * tmp27
tmp29 = libdevice.pow(tmp23, tmp28)
tmp30 = tmp21 * tmp29
tl.store(out_ptr0 + x0, tmp12, None)
tl.store(out_ptr1 + x0, tmp30, None)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (1, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 1, 1, 1), (1, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_linalg_vector_norm_pow_0[grid(1)](primals_1,
primals_2, buf0, 1, XBLOCK=1, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(primals_3, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf2 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1),
torch.float32)
triton_poi_fused_abs_add_avg_pool2d_div_mul_pow_sign_sqrt_sum_1[grid
(16384)](primals_3, primals_2, buf1, primals_4, buf2, buf3,
16384, XBLOCK=256, num_warps=4, num_stages=1)
return buf3, primals_1, primals_2, primals_3, primals_4, buf0, buf1, buf2
class SharpenedCosineSimilarityAnnotatedNew(nn.Module):
def __init__(self, in_channels=1, out_channels=1, kernel_size=1, stride
=1, padding=0, eps=1e-12):
super(SharpenedCosineSimilarityAnnotatedNew, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.eps = eps
self.padding = int(padding)
w = torch.empty(out_channels, in_channels, kernel_size, kernel_size)
self.register_parameter('w', nn.Parameter(w))
nn.init.xavier_uniform_(self.w)
self.p_scale = 10
p_init = 2 ** 0.5 * self.p_scale
self.register_parameter('p', nn.Parameter(torch.empty(out_channels)))
nn.init.constant_(self.p, p_init)
self.q_scale = 100
self.register_parameter('q', nn.Parameter(torch.empty(1)))
nn.init.constant_(self.q, 10)
def forward(self, input_0):
primals_1 = self.w
primals_2 = self.p
primals_4 = self.q
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
enzokro/sharpened_cosine_similarity_torch
|
SharpenedCosineSimilarityAnnotated
| false
| 12,353
|
[
"MIT"
] | 0
|
150c84f5cf81721baf097abdc0d4ac772fb39fc4
|
https://github.com/enzokro/sharpened_cosine_similarity_torch/tree/150c84f5cf81721baf097abdc0d4ac772fb39fc4
|
Divide
|
import torch
import torch.nn
import torch.utils.data
import torch.utils.tensorboard._pytorch_graph
import torch.onnx.symbolic_caffe2
class Divide(torch.nn.Module):
""" Divide module for a functional divide"""
def forward(self, x, y):
"""
Forward-pass routine for divide op
"""
return torch.div(x, y)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn
import torch.utils.data
import torch.utils.tensorboard._pytorch_graph
import torch.onnx.symbolic_caffe2
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 / tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class DivideNew(torch.nn.Module):
""" Divide module for a functional divide"""
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
arjunsuresh/aimet
|
Divide
| false
| 12,354
|
[
"BSD-3-Clause"
] | 0
|
f6e09cb07a91eed3a5e6b8e19e6b065303af5a39
|
https://github.com/arjunsuresh/aimet/tree/f6e09cb07a91eed3a5e6b8e19e6b065303af5a39
|
EqualLinear
|
import torch
import torch.nn.functional as F
from torch import nn
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, lr_mul=1, bias=True):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim))
self.lr_mul = lr_mul
def forward(self, input):
return F.linear(input, self.weight * self.lr_mul, bias=self.bias *
self.lr_mul)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_mul_1[grid(4)](primals_2, buf1, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(buf1, reinterpret_tensor(primals_3, (64, 4), (
4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1,
beta=1, out=buf2)
del buf0
del buf1
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0)
class EqualLinearNew(nn.Module):
def __init__(self, in_dim, out_dim, lr_mul=1, bias=True):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim))
self.lr_mul = lr_mul
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ericguizzo/stylegan2-pytorch
|
EqualLinear
| false
| 12,355
|
[
"MIT"
] | 0
|
d6e5cf4e30247e12d330537676f9ba63867cfaa0
|
https://github.com/ericguizzo/stylegan2-pytorch/tree/d6e5cf4e30247e12d330537676f9ba63867cfaa0
|
ScaleNorm
|
import math
import torch
import torch.nn as nn
class ScaleNorm(nn.Module):
"""ScaleNorm"""
"""All g’s in SCALE NORM are initialized to sqrt(d)"""
def __init__(self, scale, eps=1e-05):
super(ScaleNorm, self).__init__()
self.scale = nn.Parameter(torch.tensor(math.sqrt(scale)))
self.eps = eps
def forward(self, x):
norm = self.scale / torch.norm(x, dim=-1, keepdim=True).clamp(min=
self.eps)
return x * norm
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'scale': 1.0}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_div_linalg_vector_norm_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = libdevice.sqrt(tmp12)
tmp14 = 1e-05
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = tmp1 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
@triton.jit
def triton_poi_fused_clamp_div_linalg_vector_norm_mul_1(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_div_linalg_vector_norm_0[grid(64)](primals_1,
primals_2, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clamp_div_linalg_vector_norm_mul_1[grid(256)](
primals_2, buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
return buf1, primals_2
class ScaleNormNew(nn.Module):
"""ScaleNorm"""
"""All g’s in SCALE NORM are initialized to sqrt(d)"""
def __init__(self, scale, eps=1e-05):
super(ScaleNormNew, self).__init__()
self.scale = nn.Parameter(torch.tensor(math.sqrt(scale)))
self.eps = eps
def forward(self, input_0):
primals_1 = self.scale
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
eweiner/MAT_Extension
|
ScaleNorm
| false
| 12,356
|
[
"MIT"
] | 0
|
505884a67f97bf54e1198077d15a48531fcac7a5
|
https://github.com/eweiner/MAT_Extension/tree/505884a67f97bf54e1198077d15a48531fcac7a5
|
Generator
|
import math
import torch
import torch.nn as nn
class LayerNorm(nn.Module):
"""Construct a layernorm module (See citation for details)."""
def __init__(self, features, eps=1e-06):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class ScaleNorm(nn.Module):
"""ScaleNorm"""
"""All g’s in SCALE NORM are initialized to sqrt(d)"""
def __init__(self, scale, eps=1e-05):
super(ScaleNorm, self).__init__()
self.scale = nn.Parameter(torch.tensor(math.sqrt(scale)))
self.eps = eps
def forward(self, x):
norm = self.scale / torch.norm(x, dim=-1, keepdim=True).clamp(min=
self.eps)
return x * norm
class Generator(nn.Module):
"""Define standard linear + softmax generation step."""
def __init__(self, d_model, aggregation_type='mean', n_output=1,
n_layers=1, leaky_relu_slope=0.01, dropout=0.0, scale_norm=False):
super(Generator, self).__init__()
if n_layers == 1:
self.proj = nn.Linear(d_model, n_output)
else:
self.proj = []
for i in range(n_layers - 1):
self.proj.append(nn.Linear(d_model, d_model))
self.proj.append(nn.LeakyReLU(leaky_relu_slope))
self.proj.append(ScaleNorm(d_model) if scale_norm else
LayerNorm(d_model))
self.proj.append(nn.Dropout(dropout))
self.proj.append(nn.Linear(d_model, n_output))
self.proj = torch.nn.Sequential(*self.proj)
self.aggregation_type = aggregation_type
def forward(self, x, mask):
mask = mask.unsqueeze(-1).float()
out_masked = x * mask
if self.aggregation_type == 'mean':
out_sum = out_masked.sum(dim=1)
mask_sum = mask.sum(dim=1)
out_avg_pooling = out_sum / mask_sum
elif self.aggregation_type == 'sum':
out_sum = out_masked.sum(dim=1)
out_avg_pooling = out_sum
elif self.aggregation_type == 'dummy_node':
out_avg_pooling = out_masked[:, 0]
projected = self.proj(out_avg_pooling)
return projected
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 64
x1 = xindex // 4 % 16
x2 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (64 + x3), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (128 + x3), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (32 + x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (192 + x3), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (48 + x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp15 = tmp1 + tmp4
tmp16 = tmp15 + tmp8
tmp17 = tmp16 + tmp12
tmp18 = tmp14 / tmp17
tl.store(out_ptr0 + x4, tmp18, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, 4), (4, 1))
assert_size_stride(primals_4, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_mul_sum_0[grid(256)](primals_2, primals_1,
buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_3, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_3
del primals_4
return reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
class LayerNorm(nn.Module):
"""Construct a layernorm module (See citation for details)."""
def __init__(self, features, eps=1e-06):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class ScaleNorm(nn.Module):
"""ScaleNorm"""
"""All g’s in SCALE NORM are initialized to sqrt(d)"""
def __init__(self, scale, eps=1e-05):
super(ScaleNorm, self).__init__()
self.scale = nn.Parameter(torch.tensor(math.sqrt(scale)))
self.eps = eps
def forward(self, x):
norm = self.scale / torch.norm(x, dim=-1, keepdim=True).clamp(min=
self.eps)
return x * norm
class GeneratorNew(nn.Module):
"""Define standard linear + softmax generation step."""
def __init__(self, d_model, aggregation_type='mean', n_output=1,
n_layers=1, leaky_relu_slope=0.01, dropout=0.0, scale_norm=False):
super(GeneratorNew, self).__init__()
if n_layers == 1:
self.proj = nn.Linear(d_model, n_output)
else:
self.proj = []
for i in range(n_layers - 1):
self.proj.append(nn.Linear(d_model, d_model))
self.proj.append(nn.LeakyReLU(leaky_relu_slope))
self.proj.append(ScaleNorm(d_model) if scale_norm else
LayerNorm(d_model))
self.proj.append(nn.Dropout(dropout))
self.proj.append(nn.Linear(d_model, n_output))
self.proj = torch.nn.Sequential(*self.proj)
self.aggregation_type = aggregation_type
def forward(self, input_0, input_1):
primals_3 = self.proj.weight
primals_4 = self.proj.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
eweiner/MAT_Extension
|
Generator
| false
| 12,357
|
[
"MIT"
] | 0
|
505884a67f97bf54e1198077d15a48531fcac7a5
|
https://github.com/eweiner/MAT_Extension/tree/505884a67f97bf54e1198077d15a48531fcac7a5
|
SAB
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class MAB(nn.Module):
def __init__(self, dim_Q, dim_K, dim_V, num_heads, ln=False):
super(MAB, self).__init__()
self.dim_V = dim_V
self.num_heads = num_heads
self.fc_q = nn.Linear(dim_Q, dim_V)
self.fc_k = nn.Linear(dim_K, dim_V)
self.fc_v = nn.Linear(dim_K, dim_V)
if ln:
self.ln0 = nn.LayerNorm(dim_V)
self.ln1 = nn.LayerNorm(dim_V)
self.fc_o = nn.Linear(dim_V, dim_V)
def forward(self, Q, K):
Q = self.fc_q(Q)
K, V = self.fc_k(K), self.fc_v(K)
dim_split = self.dim_V // self.num_heads
Q_ = torch.cat(Q.split(dim_split, 2), 0)
K_ = torch.cat(K.split(dim_split, 2), 0)
V_ = torch.cat(V.split(dim_split, 2), 0)
A = torch.softmax(Q_.bmm(K_.transpose(1, 2)) / math.sqrt(self.dim_V), 2
)
O = torch.cat((Q_ + A.bmm(V_)).split(Q.size(0), 0), 2)
O = O if getattr(self, 'ln0', None) is None else self.ln0(O)
O = O + F.relu(self.fc_o(O))
O = O if getattr(self, 'ln1', None) is None else self.ln1(O)
return O
class SAB(nn.Module):
def __init__(self, dim_in, dim_out, num_heads, ln=False):
super(SAB, self).__init__()
self.mab = MAB(dim_in, dim_in, dim_out, num_heads, ln=ln)
def forward(self, X):
return self.mab(X, X)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4, 'dim_out': 4, 'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x0 + 16 * x1), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * (-4 + x1)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * (-8 + x1)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 16, tl.int64)
tmp19 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * (-12 + x1)), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + x2, tmp22, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 2, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr0 + (16 + x1), tmp13 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tl.load(in_ptr1 + (16 + x1), tmp13 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tmp0 >= tmp11
tmp20 = tl.full([1], 3, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tmp19 & tmp21
tmp23 = tl.load(in_ptr0 + (32 + x1), tmp22 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp24 = tl.load(in_ptr1 + (32 + x1), tmp22 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp22, tmp25, tmp26)
tmp28 = tmp0 >= tmp20
tl.full([1], 4, tl.int64)
tmp31 = tl.load(in_ptr0 + (48 + x1), tmp28 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp32 = tl.load(in_ptr1 + (48 + x1), tmp28 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp33 = tmp31 + tmp32
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp28, tmp33, tmp34)
tmp36 = tl.where(tmp22, tmp27, tmp35)
tmp37 = tl.where(tmp13, tmp18, tmp36)
tmp38 = tl.where(tmp4, tmp9, tmp37)
tl.store(out_ptr0 + x2, tmp38, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_4(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = tmp0 + tmp5
tmp7 = 0.0
tmp8 = tmp5 <= tmp7
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (16,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(primals_3, (16,
4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf2)
del primals_6
del primals_7
buf3 = empty_strided_cuda((16, 4, 1), (4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(64)](buf0, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = reinterpret_tensor(buf0, (16, 4, 1), (4, 1, 64), 0)
del buf0
triton_poi_fused_cat_0[grid(64)](buf2, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
triton_poi_fused_cat_0[grid(64)](buf1, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf3, reinterpret_tensor(buf5, (16, 1, 4), (4, 0,
1), 0), out=buf6)
buf7 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf8 = buf6
del buf6
triton_poi_fused__softmax_2[grid(256)](buf7, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf7
buf9 = reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 1), 0)
del buf1
extern_kernels.bmm(buf8, buf4, out=buf9)
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_cat_3[grid(64)](buf3, buf9, buf10, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0)
del buf9
extern_kernels.mm(reinterpret_tensor(buf10, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_4[grid(64)](buf10,
buf11, primals_9, buf12, buf13, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf11
del primals_9
return buf12, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), buf8, reinterpret_tensor(buf10, (16, 4), (4, 1), 0
), buf13, primals_8, reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), buf5
class MAB(nn.Module):
def __init__(self, dim_Q, dim_K, dim_V, num_heads, ln=False):
super(MAB, self).__init__()
self.dim_V = dim_V
self.num_heads = num_heads
self.fc_q = nn.Linear(dim_Q, dim_V)
self.fc_k = nn.Linear(dim_K, dim_V)
self.fc_v = nn.Linear(dim_K, dim_V)
if ln:
self.ln0 = nn.LayerNorm(dim_V)
self.ln1 = nn.LayerNorm(dim_V)
self.fc_o = nn.Linear(dim_V, dim_V)
def forward(self, Q, K):
Q = self.fc_q(Q)
K, V = self.fc_k(K), self.fc_v(K)
dim_split = self.dim_V // self.num_heads
Q_ = torch.cat(Q.split(dim_split, 2), 0)
K_ = torch.cat(K.split(dim_split, 2), 0)
V_ = torch.cat(V.split(dim_split, 2), 0)
A = torch.softmax(Q_.bmm(K_.transpose(1, 2)) / math.sqrt(self.dim_V), 2
)
O = torch.cat((Q_ + A.bmm(V_)).split(Q.size(0), 0), 2)
O = O if getattr(self, 'ln0', None) is None else self.ln0(O)
O = O + F.relu(self.fc_o(O))
O = O if getattr(self, 'ln1', None) is None else self.ln1(O)
return O
class SABNew(nn.Module):
def __init__(self, dim_in, dim_out, num_heads, ln=False):
super(SABNew, self).__init__()
self.mab = MAB(dim_in, dim_in, dim_out, num_heads, ln=ln)
def forward(self, input_0):
primals_1 = self.mab.fc_q.weight
primals_2 = self.mab.fc_q.bias
primals_4 = self.mab.fc_k.weight
primals_5 = self.mab.fc_k.bias
primals_6 = self.mab.fc_v.weight
primals_7 = self.mab.fc_v.bias
primals_8 = self.mab.fc_o.weight
primals_9 = self.mab.fc_o.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
ernoult/set_transformer
|
SAB
| false
| 12,358
|
[
"MIT"
] | 0
|
4b380106e1f43b7eb6315624c57d4d1d38737b78
|
https://github.com/ernoult/set_transformer/tree/4b380106e1f43b7eb6315624c57d4d1d38737b78
|
MAB
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class MAB(nn.Module):
def __init__(self, dim_Q, dim_K, dim_V, num_heads, ln=False):
super(MAB, self).__init__()
self.dim_V = dim_V
self.num_heads = num_heads
self.fc_q = nn.Linear(dim_Q, dim_V)
self.fc_k = nn.Linear(dim_K, dim_V)
self.fc_v = nn.Linear(dim_K, dim_V)
if ln:
self.ln0 = nn.LayerNorm(dim_V)
self.ln1 = nn.LayerNorm(dim_V)
self.fc_o = nn.Linear(dim_V, dim_V)
def forward(self, Q, K):
Q = self.fc_q(Q)
K, V = self.fc_k(K), self.fc_v(K)
dim_split = self.dim_V // self.num_heads
Q_ = torch.cat(Q.split(dim_split, 2), 0)
K_ = torch.cat(K.split(dim_split, 2), 0)
V_ = torch.cat(V.split(dim_split, 2), 0)
A = torch.softmax(Q_.bmm(K_.transpose(1, 2)) / math.sqrt(self.dim_V), 2
)
O = torch.cat((Q_ + A.bmm(V_)).split(Q.size(0), 0), 2)
O = O if getattr(self, 'ln0', None) is None else self.ln0(O)
O = O + F.relu(self.fc_o(O))
O = O if getattr(self, 'ln1', None) is None else self.ln1(O)
return O
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim_Q': 4, 'dim_K': 4, 'dim_V': 4, 'num_heads': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x0 + 16 * x1), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * (-4 + x1)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * (-8 + x1)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 16, tl.int64)
tmp19 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * (-12 + x1)), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + x2, tmp22, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 2, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr0 + (16 + x1), tmp13 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tl.load(in_ptr1 + (16 + x1), tmp13 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tmp0 >= tmp11
tmp20 = tl.full([1], 3, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tmp19 & tmp21
tmp23 = tl.load(in_ptr0 + (32 + x1), tmp22 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp24 = tl.load(in_ptr1 + (32 + x1), tmp22 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp22, tmp25, tmp26)
tmp28 = tmp0 >= tmp20
tl.full([1], 4, tl.int64)
tmp31 = tl.load(in_ptr0 + (48 + x1), tmp28 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp32 = tl.load(in_ptr1 + (48 + x1), tmp28 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp33 = tmp31 + tmp32
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp28, tmp33, tmp34)
tmp36 = tl.where(tmp22, tmp27, tmp35)
tmp37 = tl.where(tmp13, tmp18, tmp36)
tmp38 = tl.where(tmp4, tmp9, tmp37)
tl.store(out_ptr0 + x2, tmp38, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_4(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = tmp0 + tmp5
tmp7 = 0.0
tmp8 = tmp5 <= tmp7
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (16,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, reinterpret_tensor(primals_6, (16,
4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf2)
del primals_7
del primals_8
buf3 = empty_strided_cuda((16, 4, 1), (4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(64)](buf0, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = reinterpret_tensor(buf0, (16, 4, 1), (4, 1, 64), 0)
del buf0
triton_poi_fused_cat_0[grid(64)](buf2, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
triton_poi_fused_cat_0[grid(64)](buf1, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf3, reinterpret_tensor(buf5, (16, 1, 4), (4, 0,
1), 0), out=buf6)
buf7 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf8 = buf6
del buf6
triton_poi_fused__softmax_2[grid(256)](buf7, buf8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf7
buf9 = reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 1), 0)
del buf1
extern_kernels.bmm(buf8, buf4, out=buf9)
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_cat_3[grid(64)](buf3, buf9, buf10, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0)
del buf9
extern_kernels.mm(reinterpret_tensor(buf10, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_4[grid(64)](buf10,
buf11, primals_10, buf12, buf13, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf11
del primals_10
return buf12, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0
), buf8, reinterpret_tensor(buf10, (16, 4), (4, 1), 0
), buf13, primals_9, reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), buf5
class MABNew(nn.Module):
def __init__(self, dim_Q, dim_K, dim_V, num_heads, ln=False):
super(MABNew, self).__init__()
self.dim_V = dim_V
self.num_heads = num_heads
self.fc_q = nn.Linear(dim_Q, dim_V)
self.fc_k = nn.Linear(dim_K, dim_V)
self.fc_v = nn.Linear(dim_K, dim_V)
if ln:
self.ln0 = nn.LayerNorm(dim_V)
self.ln1 = nn.LayerNorm(dim_V)
self.fc_o = nn.Linear(dim_V, dim_V)
def forward(self, input_0, input_1):
primals_1 = self.fc_q.weight
primals_2 = self.fc_q.bias
primals_4 = self.fc_k.weight
primals_5 = self.fc_k.bias
primals_7 = self.fc_v.weight
primals_8 = self.fc_v.bias
primals_9 = self.fc_o.weight
primals_10 = self.fc_o.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
|
ernoult/set_transformer
|
MAB
| false
| 12,359
|
[
"MIT"
] | 0
|
4b380106e1f43b7eb6315624c57d4d1d38737b78
|
https://github.com/ernoult/set_transformer/tree/4b380106e1f43b7eb6315624c57d4d1d38737b78
|
EdgeFeaturesLayer
|
import torch
import torch.nn as nn
class EdgeFeaturesLayer(nn.Module):
def __init__(self, d_model, d_edge, h, dropout):
super(EdgeFeaturesLayer, self).__init__()
assert d_model % h == 0
d_model // h
self.linear = nn.Linear(d_edge, 1, bias=False)
with torch.no_grad():
self.linear.weight.fill_(0.25)
def forward(self, x):
p_edge = x.permute(0, 2, 3, 1)
p_edge = self.linear(p_edge).permute(0, 3, 1, 2)
return torch.relu(p_edge)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_edge': 4, 'h': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](primals_1, buf0, 64, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 1, 4, 4), (16, 1, 4, 1), 0)
del buf1
buf3 = empty_strided_cuda((4, 1, 4, 4), (16, 1, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(64)](buf2, buf3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
return buf2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf3
class EdgeFeaturesLayerNew(nn.Module):
def __init__(self, d_model, d_edge, h, dropout):
super(EdgeFeaturesLayerNew, self).__init__()
assert d_model % h == 0
d_model // h
self.linear = nn.Linear(d_edge, 1, bias=False)
with torch.no_grad():
self.linear.weight.fill_(0.25)
def forward(self, input_0):
primals_2 = self.linear.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
eweiner/MAT_Extension
|
EdgeFeaturesLayer
| false
| 12,360
|
[
"MIT"
] | 0
|
505884a67f97bf54e1198077d15a48531fcac7a5
|
https://github.com/eweiner/MAT_Extension/tree/505884a67f97bf54e1198077d15a48531fcac7a5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.